repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
40223220/w16b_test | static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testpatch.py | 739 | 53126 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest
from unittest.test.testmock import support
from unittest.test.testmock.support import SomeClass, is_instance
from unittest.mock import (
NonCallableMock, CallableMixin, patch, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'frooble', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
return
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function(1)
function.assert_called_with(1)
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('unittest.test.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
sys.modules.pop('squizz', None)
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
squizz = Mock()
squizz.squozz = 6
sys.modules['squizz'] = squizz
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
marxin/youtube-dl | youtube_dl/extractor/echomsk.py | 130 | 1315 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class EchoMskIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?echo\.msk\.ru/sounds/(?P<id>\d+)'
_TEST = {
'url': 'http://www.echo.msk.ru/sounds/1464134.html',
'md5': '2e44b3b78daff5b458e4dbc37f191f7c',
'info_dict': {
'id': '1464134',
'ext': 'mp3',
'title': 'Особое мнение - 29 декабря 2014, 19:08',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
audio_url = self._search_regex(
r'<a rel="mp3" href="([^"]+)">', webpage, 'audio URL')
title = self._html_search_regex(
r'<a href="/programs/[^"]+" target="_blank">([^<]+)</a>',
webpage, 'title')
air_date = self._html_search_regex(
r'(?s)<div class="date">(.+?)</div>',
webpage, 'date', fatal=False, default=None)
if air_date:
air_date = re.sub(r'(\s)\1+', r'\1', air_date)
if air_date:
title = '%s - %s' % (title, air_date)
return {
'id': video_id,
'url': audio_url,
'title': title,
}
| unlicense |
KousikaGanesh/purchaseandInventory | openerp/addons/hr_holidays/report/hr_holidays_report.py | 52 | 2423 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class hr_holidays_remaining_leaves_user(osv.osv):
_name = "hr.holidays.remaining.leaves.user"
_description = "Total holidays by type"
_auto = False
_columns = {
'name': fields.char('Employee', size=64),
'no_of_leaves': fields.integer('Remaining leaves'),
'user_id': fields.many2one('res.users', 'User'),
'leave_type': fields.char('Leave Type', size=64),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_holidays_remaining_leaves_user')
cr.execute("""
CREATE or REPLACE view hr_holidays_remaining_leaves_user as (
SELECT
min(hrs.id) as id,
rr.name as name,
sum(hrs.number_of_days) as no_of_leaves,
rr.user_id as user_id,
hhs.name as leave_type
FROM
hr_holidays as hrs, hr_employee as hre,
resource_resource as rr,hr_holidays_status as hhs
WHERE
hrs.employee_id = hre.id and
hre.resource_id = rr.id and
hhs.id = hrs.holiday_status_id
GROUP BY
rr.name,rr.user_id,hhs.name
)
""")
hr_holidays_remaining_leaves_user()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
4Quant/tensorflow | tensorflow/tensorboard/scripts/serialize_tensorboard.py | 5 | 6341 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Consume and serialize all of the data from a running TensorBoard instance.
This program connects to a live TensorBoard backend at given port, and saves
all of the data to local disk JSON in a predictable format.
This makes it easy to mock out the TensorBoard backend so that the frontend
may be tested in isolation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import os.path
import shutil
import threading
import urllib
import six
from six.moves import http_client
import tensorflow as tf
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
tf.flags.DEFINE_string('logdir', None, """the logdir to pass to the TensorBoard
backend; data will be read from this logdir for serialization.""")
tf.flags.DEFINE_string('target', None, """The directoy where serialized data
will be written""")
tf.flags.DEFINE_boolean('overwrite', False, """Whether to remove and overwrite
TARGET if it already exists.""")
tf.flags.DEFINE_boolean(
'purge_orphaned_data', True, 'Whether to purge data that '
'may have been orphaned due to TensorBoard restarts. '
'Disabling purge_orphaned_data can be used to debug data '
'disappearance.')
FLAGS = tf.flags.FLAGS
BAD_CHARACTERS = "#%&{}\\/<>*? $!'\":@+`|="
def Url(route, params):
"""Takes route and query params, and produce encoded url for that asset."""
out = route
if params:
# sorting ensures a unique filename for each query
sorted_params = sorted(six.iteritems(params))
out += '?' + urllib.urlencode(sorted_params)
return out
def Clean(s):
"""Clean a string so it can be used as a filepath."""
for c in BAD_CHARACTERS:
s = s.replace(c, '_')
return s
class TensorBoardStaticSerializer(object):
"""Serialize all the routes from a TensorBoard server to static json."""
def __init__(self, connection, target_path):
self.connection = connection
EnsureDirectoryExists(os.path.join(target_path, 'data'))
self.path = target_path
def GetAndSave(self, url):
"""GET the given url. Serialize the result at clean path version of url."""
self.connection.request('GET', '/data/' + url)
response = self.connection.getresponse()
destination = self.path + '/data/' + Clean(url)
if response.status != 200:
raise IOError(url)
content = response.read()
with open(destination, 'w') as f:
f.write(content)
return content
def GetRouteAndSave(self, route, params=None):
"""GET given route and params. Serialize the result. Return as JSON."""
url = Url(route, params)
return json.loads(self.GetAndSave(url))
def Run(self):
"""Serialize everything from a TensorBoard backend."""
# get the runs object, which is an index for every tag.
runs = self.GetRouteAndSave('runs')
# collect sampled data.
self.GetRouteAndSave('scalars')
# now let's just download everything!
for run, tag_type_to_tags in six.iteritems(runs):
for tag_type, tags in six.iteritems(tag_type_to_tags):
try:
if tag_type == 'graph':
# in this case, tags is a bool which specifies if graph is present.
if tags:
self.GetRouteAndSave('graph', {run: run})
elif tag_type == 'images':
for t in tags:
images = self.GetRouteAndSave('images', {'run': run, 'tag': t})
for im in images:
url = 'individualImage?' + im['query']
# pull down the images themselves.
self.GetAndSave(url)
else:
for t in tags:
# Save this, whatever it is :)
self.GetRouteAndSave(tag_type, {'run': run, 'tag': t})
except IOError as e:
PrintAndLog('Retrieval failed for %s/%s/%s' % (tag_type, run, tags),
tf.logging.WARN)
PrintAndLog('Got Exception: %s' % e, tf.logging.WARN)
PrintAndLog('continuing...', tf.logging.WARN)
continue
def EnsureDirectoryExists(path):
if not os.path.exists(path):
os.makedirs(path)
def PrintAndLog(msg, lvl=tf.logging.INFO):
tf.logging.log(lvl, msg)
print(msg)
def main(unused_argv=None):
target = FLAGS.target
logdir = FLAGS.logdir
if not target or not logdir:
PrintAndLog('Both --target and --logdir are required.', tf.logging.ERROR)
return -1
if os.path.exists(target):
if FLAGS.overwrite:
if os.path.isdir(target):
shutil.rmtree(target)
else:
os.remove(target)
else:
PrintAndLog('Refusing to overwrite target %s without --overwrite' %
target, tf.logging.ERROR)
return -2
path_to_run = server.ParseEventFilesSpec(FLAGS.logdir)
PrintAndLog('About to load Multiplexer. This may take some time.')
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE,
purge_orphaned_data=FLAGS.purge_orphaned_data)
server.ReloadMultiplexer(multiplexer, path_to_run)
PrintAndLog('Multiplexer load finished. Starting TensorBoard server.')
s = server.BuildServer(multiplexer, 'localhost', 0)
server_thread = threading.Thread(target=s.serve_forever)
server_thread.daemon = True
server_thread.start()
connection = http_client.HTTPConnection('localhost', s.server_address[1])
PrintAndLog('Server setup! Downloading data from the server.')
x = TensorBoardStaticSerializer(connection, target)
x.Run()
PrintAndLog('Done downloading data.')
connection.close()
s.shutdown()
s.server_close()
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
sonofeft/LazGUI | lazgui/examples/factory_v2.py | 1 | 2749 | """
This example demonstrates the proper use of project: lazgui
"""
import sys
import os
sys.path.insert(0, os.path.abspath("../../")) # needed to find lazgui development version
from lazgui.gui_factory import *
Lay = get_layout(layout_type='vstack', Left=41, Height=0, Top=42, Width=0,
TopMargin=6, RightMargin=6, BottomMargin=6, LeftMargin=6)
Lay.add_widget( get_radiogroup(Items=None, ItemIndex=1) )
Lay.add_widget( get_label( widget_name='Get_Text', Caption='Label for Get_Text', BottomMargin=0) )
Lay.add_widget( get_button(widget_name='Do Wide Things', Width=100, TopMargin=0) )
Lay.add_widget( get_edit(widget_name='Get What', label_text='', initial_value='No Label') )
Lay.add_widget( get_edit(widget_name='Get Stuff',
label_text='Enter Stuff', initial_value='Has Label') )
# - - - -
LayH = get_layout(layout_type='hstack', Left=41, Height=0, Top=42, Width=0,
TopMargin=6, RightMargin=6, BottomMargin=6, LeftMargin=6)
LayH.add_widget( get_edit(widget_name='Get Other Stuff',
label_text='Enter Other Stuff', initial_value='LabeledEdit') )
LayH.add_widget( get_edit(edit_type='other', widget_name='GetValue',
initial_value='HLayout Wrapped', label_text='xxx') )
Lay.add_widget( get_panel(layout=LayH) )
# - - - -
Page = get_pagecontrol(Height=300, Width=400)
# - - - - -
LayTab1 = get_layout(layout_type='vstack', Left=0, Height=20, Top=0, Width=20,
TopMargin=6, RightMargin=6, BottomMargin=6, LeftMargin=6)
LayTab1.add_widget( get_edit(widget_name='Get What', label_text='', initial_value='No Label') )
LayTab1.add_widget( get_edit(widget_name='Get Stuff',
label_text='Enter Stuff', initial_value='Has Label') )
Tab1 = get_tabsheet(layout=LayTab1, Caption='My 1st Tab')
Page.add_tabsheet( Tab1 )
# - - - - -
LayTab2 = get_layout(layout_type='vstack', Left=0, Height=20, Top=0, Width=20,
TopMargin=6, RightMargin=6, BottomMargin=6, LeftMargin=6)
LayTab2.add_widget( get_edit(widget_name='Get What', label_text='', initial_value='No Label #2') )
LayTab2.add_widget( get_edit(widget_name='Get Stuff #2',
label_text='Enter Stuff #2', initial_value='Has Label #2') )
Tab2 = get_tabsheet(layout=LayTab2, Caption='My 2nd Tab')
Page.add_tabsheet( Tab2 )
# - - - - -
Lay.add_widget( Page )
Lay.summ_print()
print '='*55
F = get_form( form_name='MyForm1', layout=Lay,
Left=611, Height=240, Top=162, Width=320,
Caption=None, LCLVersion='1.6.0.4')
C = get_gui(project_name='ProjWhat', form1_obj=F)
C.save_project_files( path_name=r'D:\tmp\test_lazgui\v1', over_write_OK=True )
| gpl-3.0 |
futurulus/scipy | scipy/signal/setup.py | 39 | 1230 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('signal', parent_package, top_path)
config.add_data_dir('tests')
config.add_extension('sigtools',
sources=['sigtoolsmodule.c', 'firfilter.c',
'medianfilter.c', 'lfilter.c.src',
'correlate_nd.c.src'],
depends=['sigtools.h'],
include_dirs=['.'],
**numpy_nodepr_api)
config.add_extension('_spectral', sources=['_spectral.c'])
config.add_extension('_max_len_seq_inner', sources=['_max_len_seq_inner.c'])
spline_src = ['splinemodule.c', 'S_bspline_util.c', 'D_bspline_util.c',
'C_bspline_util.c', 'Z_bspline_util.c', 'bspline_util.c']
config.add_extension('spline', sources=spline_src, **numpy_nodepr_api)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
goyalsid/phageParser | parserscripts/crisprfinder.py | 3 | 1292 | """
Current non-native dependencies in selenium package. If desired to have
script run 'headless' (without any browser opening), then need
additional python selenium PhantomJS dependency.
Instructions for headless operation:
- replace 'driver = webdriver.Firefox()' line with
'driver = webdriver.PhantomJS()'
Note: accessing the CRISPRfinder tool online for many files back to
back may block ip address if website is setup to prevent automated
webcrawling. A potential work-around is to add a 'wait' or 'sleep'
function in between individual calls (i.e. for 50ms).
"""
import os
import sys
from selenium import webdriver
genome_input = sys.argv[1] # .fasta file format required
crispr_results_output = sys.argv[2] # textfile with CRISPR results
# path of input/output files is relative to python script location
driver = webdriver.Firefox()
driver.get("http://crispr.i2bc.paris-saclay.fr/Server/")
assert "CRISPR" in driver.title
driver.find_element_by_name("fname").send_keys(
os.getcwd() + '/' + genome_input
)
driver.find_element_by_name("submit").click()
crispr_results = driver.find_element_by_xpath("//div[@class='content']").text
with open(crispr_results_output, 'wt') as f_handle:
f_handle.write('\n'.join(crispr_results.split("\n")[3:]))
driver.close()
| mit |
befelix/scipy | scipy/stats/tests/test_distributions.py | 2 | 117414 | """ Test functions for stats module
"""
from __future__ import division, print_function, absolute_import
import warnings
import re
import sys
import pickle
from numpy.testing import (TestCase, run_module_suite, assert_equal,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_raises, assert_warns, dec)
from nose import SkipTest
import numpy
import numpy as np
from numpy import typecodes, array
from scipy import special
import scipy.stats as stats
from scipy.stats._distn_infrastructure import argsreduce
import scipy.stats.distributions
from scipy.special import xlogy
from test_continuous_basic import distcont
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
# Generate test cases to test cdf and distribution consistency.
# Note that this list does not include all distributions.
dists = ['uniform', 'norm', 'lognorm', 'expon', 'beta',
'powerlaw', 'bradford', 'burr', 'fisk', 'cauchy', 'halfcauchy',
'foldcauchy', 'gamma', 'gengamma', 'loggamma',
'alpha', 'anglit', 'arcsine', 'betaprime', 'dgamma',
'exponnorm', 'exponweib', 'exponpow', 'frechet_l', 'frechet_r',
'gilbrat', 'f', 'ncf', 'chi2', 'chi', 'nakagami', 'genpareto',
'genextreme', 'genhalflogistic', 'pareto', 'lomax', 'halfnorm',
'halflogistic', 'fatiguelife', 'foldnorm', 'ncx2', 't', 'nct',
'weibull_min', 'weibull_max', 'dweibull', 'maxwell', 'rayleigh',
'genlogistic', 'logistic', 'gumbel_l', 'gumbel_r', 'gompertz',
'hypsecant', 'laplace', 'reciprocal', 'trapz', 'triang', 'tukeylambda',
'vonmises', 'vonmises_line', 'pearson3', 'gennorm', 'halfgennorm',
'rice', 'kappa4', 'kappa3', 'truncnorm', 'argus']
def _assert_hasattr(a, b, msg=None):
if msg is None:
msg = '%s does not have attribute %s' % (a, b)
assert_(hasattr(a, b), msg=msg)
def test_api_regression():
# https://github.com/scipy/scipy/issues/3802
_assert_hasattr(scipy.stats.distributions, 'f_gen')
# check function for test generator
def check_distribution(dist, args, alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
if (pval < alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
assert_(pval > alpha,
msg="D = {}; pval = {}; alpha = {}; args = {}".format(
D, pval, alpha, args))
# nose test generator
def test_all_distributions():
for dist in dists:
distfunc = getattr(stats, dist)
nargs = distfunc.numargs
alpha = 0.01
if dist == 'fatiguelife':
alpha = 0.001
if dist == 'trapz':
args = tuple(np.sort(np.random.random(nargs)))
elif dist == 'triang':
args = tuple(np.random.random(nargs))
elif dist == 'reciprocal' or dist == 'truncnorm':
vals = np.random.random(nargs)
vals[1] = vals[0] + 1.0
args = tuple(vals)
elif dist == 'vonmises':
yield check_distribution, dist, (10,), alpha
yield check_distribution, dist, (101,), alpha
args = tuple(1.0 + np.random.random(nargs))
else:
args = tuple(1.0 + np.random.random(nargs))
yield check_distribution, dist, args, alpha
def check_vonmises_pdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s)))
def check_vonmises_cdf_periodic(k, l, s, x):
vm = stats.vonmises(k, loc=l, scale=s)
assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1)
def test_vonmises_pdf_periodic():
for k in [0.1, 1, 101]:
for x in [0, 1, numpy.pi, 10, 100]:
yield check_vonmises_pdf_periodic, k, 0, 1, x
yield check_vonmises_pdf_periodic, k, 1, 1, x
yield check_vonmises_pdf_periodic, k, 0, 10, x
yield check_vonmises_cdf_periodic, k, 0, 1, x
yield check_vonmises_cdf_periodic, k, 1, 1, x
yield check_vonmises_cdf_periodic, k, 0, 10, x
def test_vonmises_line_support():
assert_equal(stats.vonmises_line.a, -np.pi)
assert_equal(stats.vonmises_line.b, np.pi)
def test_vonmises_numerical():
vm = stats.vonmises(800)
assert_almost_equal(vm.cdf(0), 0.5)
def test_support():
"""gh-6235"""
def check_open_support(rvs, args):
dist = getattr(stats, rvs)
assert_almost_equal(dist.pdf(dist.a, *args), 0)
assert_equal(dist.logpdf(dist.a, *args), -np.inf)
assert_almost_equal(dist.pdf(dist.b, *args), 0)
assert_equal(dist.logpdf(dist.b, *args), -np.inf)
dists = ['alpha', 'betaprime', 'burr', 'burr12',
'fatiguelife', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat',
'powerlognorm', 'rayleigh', 'wald']
dct = dict(distcont)
for dist in dists:
args = dct[dist]
yield check_open_support, dist, args
class TestRandInt(TestCase):
def test_rvs(self):
vals = stats.randint.rvs(5, 30, size=100)
assert_(numpy.all(vals < 30) & numpy.all(vals >= 5))
assert_(len(vals) == 100)
vals = stats.randint.rvs(5, 30, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.randint.rvs(15, 46)
assert_((val >= 15) & (val < 46))
assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val)))
val = stats.randint(15, 46).rvs(3)
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pdf(self):
k = numpy.r_[0:36]
out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0)
vals = stats.randint.pmf(k, 5, 30)
assert_array_almost_equal(vals, out)
def test_cdf(self):
x = numpy.r_[0:36:100j]
k = numpy.floor(x)
out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0)
vals = stats.randint.cdf(x, 5, 30)
assert_array_almost_equal(vals, out, decimal=12)
class TestBinom(TestCase):
def test_rvs(self):
vals = stats.binom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.binom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.binom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for Ticket #1842
vals1 = stats.binom.pmf(100, 100, 1)
vals2 = stats.binom.pmf(0, 100, 0)
assert_allclose(vals1, 1.0, rtol=1e-15, atol=0)
assert_allclose(vals2, 1.0, rtol=1e-15, atol=0)
def test_entropy(self):
# Basic entropy tests.
b = stats.binom(2, 0.5)
expected_p = np.array([0.25, 0.5, 0.25])
expected_h = -sum(xlogy(expected_p, expected_p))
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.binom(2, 0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.binom(2, 1.0)
h = b.entropy()
assert_equal(h, 0.0)
def test_warns_p0(self):
# no spurious warnigns are generated for p=0; gh-3817
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_equal(stats.binom(n=2, p=0).mean(), 0)
assert_equal(stats.binom(n=2, p=0).std(), 0)
class TestBernoulli(TestCase):
def test_rvs(self):
vals = stats.bernoulli.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.bernoulli.rvs(0.75)
assert_(isinstance(val, int))
val = stats.bernoulli(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_entropy(self):
# Simple tests of entropy.
b = stats.bernoulli(0.25)
expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
h = b.entropy()
assert_allclose(h, expected_h)
b = stats.bernoulli(0.0)
h = b.entropy()
assert_equal(h, 0.0)
b = stats.bernoulli(1.0)
h = b.entropy()
assert_equal(h, 0.0)
class TestBradford(TestCase):
# gh-6216
def test_cdf_ppf(self):
c = 0.1
x = np.logspace(-20, -4)
q = stats.bradford.cdf(x, c)
xx = stats.bradford.ppf(q, c)
assert_allclose(x, xx)
class TestNBinom(TestCase):
def test_rvs(self):
vals = stats.nbinom.rvs(10, 0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.nbinom.rvs(10, 0.75)
assert_(isinstance(val, int))
val = stats.nbinom(10, 0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
# regression test for ticket 1779
assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)),
stats.nbinom.pmf(700, 721, 0.52))
# logpmf(0,1,1) shouldn't return nan (regression test for gh-4029)
val = scipy.stats.nbinom.logpmf(0, 1, 1)
assert_equal(val, 0)
class TestGeom(TestCase):
def test_rvs(self):
vals = stats.geom.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.geom.rvs(0.75)
assert_(isinstance(val, int))
val = stats.geom(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf(self):
vals = stats.geom.pmf([1, 2, 3], 0.5)
assert_array_almost_equal(vals, [0.5, 0.25, 0.125])
def test_logpmf(self):
# regression test for ticket 1793
vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5))
vals2 = stats.geom.logpmf([1, 2, 3], 0.5)
assert_allclose(vals1, vals2, rtol=1e-15, atol=0)
# regression test for gh-4028
val = stats.geom.logpmf(1, 1)
assert_equal(val, 0.0)
def test_cdf_sf(self):
vals = stats.geom.cdf([1, 2, 3], 0.5)
vals_sf = stats.geom.sf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, expected)
assert_array_almost_equal(vals_sf, 1-expected)
def test_logcdf_logsf(self):
vals = stats.geom.logcdf([1, 2, 3], 0.5)
vals_sf = stats.geom.logsf([1, 2, 3], 0.5)
expected = array([0.5, 0.75, 0.875])
assert_array_almost_equal(vals, np.log(expected))
assert_array_almost_equal(vals_sf, np.log1p(-expected))
def test_ppf(self):
vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5)
expected = array([1.0, 2.0, 3.0])
assert_array_almost_equal(vals, expected)
class TestGennorm(TestCase):
def test_laplace(self):
# test against Laplace (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 1)
pdf2 = stats.laplace.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_norm(self):
# test against normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.gennorm.pdf(points, 2)
pdf2 = stats.norm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
class TestHalfgennorm(TestCase):
def test_expon(self):
# test against exponential (special case for beta=1)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 1)
pdf2 = stats.expon.pdf(points)
assert_almost_equal(pdf1, pdf2)
def test_halfnorm(self):
# test against half normal (special case for beta=2)
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, 2)
pdf2 = stats.halfnorm.pdf(points, scale=2**-.5)
assert_almost_equal(pdf1, pdf2)
def test_gennorm(self):
# test against generalized normal
points = [1, 2, 3]
pdf1 = stats.halfgennorm.pdf(points, .497324)
pdf2 = stats.gennorm.pdf(points, .497324)
assert_almost_equal(pdf1, 2*pdf2)
class TestTruncnorm(TestCase):
def test_ppf_ticket1131(self):
vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan])
assert_array_almost_equal(vals, expected)
def test_isf_ticket1131(self):
vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1.,
loc=[3]*7, scale=2)
expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan])
assert_array_almost_equal(vals, expected)
def test_gh_2477_small_values(self):
# Check a case that worked in the original issue.
low, high = -11, -10
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
# Check a case that failed in the original issue.
low, high = 10, 11
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_2477_large_values(self):
# Check a case that fails because of extreme tailness.
raise SkipTest('truncnorm rvs is know to fail at extreme tails')
low, high = 100, 101
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
def test_gh_1489_trac_962_rvs(self):
# Check the original example.
low, high = 10, 15
x = stats.truncnorm.rvs(low, high, 0, 1, size=10)
assert_(low < x.min() < x.max() < high)
class TestHypergeom(TestCase):
def test_rvs(self):
vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50))
assert_(numpy.all(vals >= 0) &
numpy.all(vals <= 3))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.hypergeom.rvs(20, 3, 10)
assert_(isinstance(val, int))
val = stats.hypergeom(20, 3, 10).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_precision(self):
# comparison number from mpmath
M = 2500
n = 50
N = 500
tot = M
good = n
hgpmf = stats.hypergeom.pmf(2, tot, good, N)
assert_almost_equal(hgpmf, 0.0010114963068932233, 11)
def test_args(self):
# test correct output for corner cases of arguments
# see gh-2325
assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11)
assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11)
def test_cdf_above_one(self):
# for some values of parameters, hypergeom cdf was >1, see gh-2238
assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0)
def test_precision2(self):
# Test hypergeom precision for large numbers. See #1218.
# Results compared with those from R.
oranges = 9.9e4
pears = 1.1e5
fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4
quantile = 2e4
res = []
for eaten in fruits_eaten:
res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges,
eaten))
expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32,
8.265601e-11, 0.1237904, 1])
assert_allclose(res, expected, atol=0, rtol=5e-7)
# Test with array_like first argument
quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4]
res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4)
expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69]
assert_allclose(res2, expected2, atol=0, rtol=5e-7)
def test_entropy(self):
# Simple tests of entropy.
hg = stats.hypergeom(4, 1, 1)
h = hg.entropy()
expected_p = np.array([0.75, 0.25])
expected_h = -np.sum(xlogy(expected_p, expected_p))
assert_allclose(h, expected_h)
hg = stats.hypergeom(1, 1, 1)
h = hg.entropy()
assert_equal(h, 0.0)
def test_logsf(self):
# Test logsf for very large numbers. See issue #4982
# Results compare with those from R (v3.2.0):
# phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE)
# -2239.771
k = 1e4
M = 1e7
n = 1e6
N = 5e4
result = stats.hypergeom.logsf(k, M, n, N)
exspected = -2239.771 # From R
assert_almost_equal(result, exspected, decimal=3)
class TestLoggamma(TestCase):
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
class TestLogistic(TestCase):
# gh-6226
def test_cdf_ppf(self):
x = np.linspace(-20, 20)
y = stats.logistic.cdf(x)
xx = stats.logistic.ppf(y)
assert_allclose(x, xx)
def test_sf_isf(self):
x = np.linspace(-20, 20)
y = stats.logistic.sf(x)
xx = stats.logistic.isf(y)
assert_allclose(x, xx)
def test_extreme_values(self):
# p is chosen so that 1 - (1 - p) == p in double precision
p = 9.992007221626409e-16
desired = 34.53957599234088
assert_allclose(stats.logistic.ppf(1 - p), desired)
assert_allclose(stats.logistic.isf(p), desired)
class TestLogser(TestCase):
def test_rvs(self):
vals = stats.logser.rvs(0.75, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.logser.rvs(0.75)
assert_(isinstance(val, int))
val = stats.logser(0.75).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_pmf_small_p(self):
m = stats.logser.pmf(4, 1e-20)
# The expected value was computed using mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 64
# >>> k = 4
# >>> p = mpmath.mpf('1e-20')
# >>> float(-(p**k)/k/mpmath.log(1-p))
# 2.5e-61
# It is also clear from noticing that for very small p,
# log(1-p) is approximately -p, and the formula becomes
# p**(k-1) / k
assert_allclose(m, 2.5e-61)
def test_mean_small_p(self):
m = stats.logser.mean(1e-8)
# The expected mean was computed using mpmath:
# >>> import mpmath
# >>> mpmath.dps = 60
# >>> p = mpmath.mpf('1e-8')
# >>> float(-p / ((1 - p)*mpmath.log(1 - p)))
# 1.000000005
assert_allclose(m, 1.000000005)
class TestPareto(TestCase):
def test_stats(self):
# Check the stats() method with some simple values. Also check
# that the calculations do not trigger RuntimeWarnings.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
m, v, s, k = stats.pareto.stats(0.5, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.0, moments='mvsk')
assert_equal(m, np.inf)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(1.5, moments='mvsk')
assert_equal(m, 3.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.0, moments='mvsk')
assert_equal(m, 2.0)
assert_equal(v, np.inf)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(2.5, moments='mvsk')
assert_allclose(m, 2.5 / 1.5)
assert_allclose(v, 2.5 / (1.5*1.5*0.5))
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.0, moments='mvsk')
assert_allclose(m, 1.5)
assert_allclose(v, 0.75)
assert_equal(s, np.nan)
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(3.5, moments='mvsk')
assert_allclose(m, 3.5 / 2.5)
assert_allclose(v, 3.5 / (2.5*2.5*1.5))
assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.0, moments='mvsk')
assert_allclose(m, 4.0 / 3.0)
assert_allclose(v, 4.0 / 18.0)
assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0))
assert_equal(k, np.nan)
m, v, s, k = stats.pareto.stats(4.5, moments='mvsk')
assert_allclose(m, 4.5 / 3.5)
assert_allclose(v, 4.5 / (3.5*3.5*2.5))
assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5))
assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5))
def test_sf(self):
x = 1e9
b = 2
scale = 1.5
p = stats.pareto.sf(x, b, loc=0, scale=scale)
expected = (scale/x)**b # 2.25e-18
assert_allclose(p, expected)
class TestGenpareto(TestCase):
def test_ab(self):
# c >= 0: a, b = [0, inf]
for c in [1., 0.]:
c = np.asarray(c)
stats.genpareto._argcheck(c) # ugh
assert_equal(stats.genpareto.a, 0.)
assert_(np.isposinf(stats.genpareto.b))
# c < 0: a=0, b=1/|c|
c = np.asarray(-2.)
stats.genpareto._argcheck(c)
assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5])
def test_c0(self):
# with c=0, genpareto reduces to the exponential distribution
rv = stats.genpareto(c=0.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.expon.pdf(x))
assert_allclose(rv.cdf(x), stats.expon.cdf(x))
assert_allclose(rv.sf(x), stats.expon.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.expon.ppf(q))
def test_cm1(self):
# with c=-1, genpareto reduces to the uniform distr on [0, 1]
rv = stats.genpareto(c=-1.)
x = np.linspace(0, 10., 30)
assert_allclose(rv.pdf(x), stats.uniform.pdf(x))
assert_allclose(rv.cdf(x), stats.uniform.cdf(x))
assert_allclose(rv.sf(x), stats.uniform.sf(x))
q = np.linspace(0., 1., 10)
assert_allclose(rv.ppf(q), stats.uniform.ppf(q))
# logpdf(1., c=-1) should be zero
assert_allclose(rv.logpdf(1), 0)
def test_x_inf(self):
# make sure x=inf is handled gracefully
rv = stats.genpareto(c=0.1)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=0.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
rv = stats.genpareto(c=-1.)
assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.])
assert_(np.isneginf(rv.logpdf(np.inf)))
def test_c_continuity(self):
# pdf is continuous at c=0, -1
x = np.linspace(0, 10, 30)
for c in [0, -1]:
pdf0 = stats.genpareto.pdf(x, c)
for dc in [1e-14, -1e-14]:
pdfc = stats.genpareto.pdf(x, c + dc)
assert_allclose(pdf0, pdfc, atol=1e-12)
cdf0 = stats.genpareto.cdf(x, c)
for dc in [1e-14, 1e-14]:
cdfc = stats.genpareto.cdf(x, c + dc)
assert_allclose(cdf0, cdfc, atol=1e-12)
def test_c_continuity_ppf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
ppf0 = stats.genpareto.ppf(q, c)
for dc in [1e-14, -1e-14]:
ppfc = stats.genpareto.ppf(q, c + dc)
assert_allclose(ppf0, ppfc, atol=1e-12)
def test_c_continuity_isf(self):
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [0., -1.]:
isf0 = stats.genpareto.isf(q, c)
for dc in [1e-14, -1e-14]:
isfc = stats.genpareto.isf(q, c + dc)
assert_allclose(isf0, isfc, atol=1e-12)
def test_cdf_ppf_roundtrip(self):
# this should pass with machine precision. hat tip @pbrod
q = np.r_[np.logspace(1e-12, 0.01, base=0.1),
np.linspace(0.01, 1, 30, endpoint=False),
1. - np.logspace(1e-12, 0.01, base=0.1)]
for c in [1e-8, -1e-18, 1e-15, -1e-15]:
assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c),
q, atol=1e-15)
def test_logsf(self):
logp = stats.genpareto.logsf(1e10, .01, 0, 1)
assert_allclose(logp, -1842.0680753952365)
class TestPearson3(TestCase):
def test_rvs(self):
vals = stats.pearson3.rvs(0.1, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllFloat'])
val = stats.pearson3.rvs(0.5)
assert_(isinstance(val, float))
val = stats.pearson3(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllFloat'])
assert_(len(val) == 3)
def test_pdf(self):
vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]),
atol=1e-6)
vals = stats.pearson3.pdf(-3, 0.1)
assert_allclose(vals, np.array([0.00313791]), atol=1e-6)
vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092,
0.39885918, 0.23413173]), atol=1e-6)
def test_cdf(self):
vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2])
assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]),
atol=1e-6)
vals = stats.pearson3.cdf(-3, 0.1)
assert_allclose(vals, [0.00082256], atol=1e-6)
vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1)
assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01,
5.06649130e-01, 8.41442111e-01], atol=1e-6)
class TestKappa4(TestCase):
def test_cdf_genpareto(self):
# h = 1 and k != 0 is generalized Pareto
x = [0.0, 0.1, 0.2, 0.5]
h = 1.0
for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0,
1.9]:
vals = stats.kappa4.cdf(x, h, k)
# shape parameter is opposite what is expected
vals_comp = stats.genpareto.cdf(x, -k)
assert_allclose(vals, vals_comp)
def test_cdf_genextreme(self):
# h = 0 and k != 0 is generalized extreme value
x = np.linspace(-5, 5, 10)
h = 0.0
k = np.linspace(-3, 3, 10)
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.genextreme.cdf(x, k)
assert_allclose(vals, vals_comp)
def test_cdf_expon(self):
# h = 1 and k = 0 is exponential
x = np.linspace(0, 10, 10)
h = 1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.expon.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_gumbel_r(self):
# h = 0 and k = 0 is gumbel_r
x = np.linspace(-5, 5, 10)
h = 0.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.gumbel_r.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_logistic(self):
# h = -1 and k = 0 is logistic
x = np.linspace(-5, 5, 10)
h = -1.0
k = 0.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.logistic.cdf(x)
assert_allclose(vals, vals_comp)
def test_cdf_uniform(self):
# h = 1 and k = 1 is uniform
x = np.linspace(-5, 5, 10)
h = 1.0
k = 1.0
vals = stats.kappa4.cdf(x, h, k)
vals_comp = stats.uniform.cdf(x)
assert_allclose(vals, vals_comp)
def test_integers_ctor(self):
# regression test for gh-7416: _argcheck fails for integer h and k
# in numpy 1.12
stats.kappa4(1, 2)
class TestPoisson(TestCase):
def test_pmf_basic(self):
# Basic case
ln2 = np.log(2)
vals = stats.poisson.pmf([0, 1, 2], ln2)
expected = [0.5, ln2/2, ln2**2/4]
assert_allclose(vals, expected)
def test_mu0(self):
# Edge case: mu=0
vals = stats.poisson.pmf([0, 1, 2], 0)
expected = [1, 0, 0]
assert_array_equal(vals, expected)
interval = stats.poisson.interval(0.95, 0)
assert_equal(interval, (0, 0))
def test_rvs(self):
vals = stats.poisson.rvs(0.5, size=(2, 50))
assert_(numpy.all(vals >= 0))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.poisson.rvs(0.5)
assert_(isinstance(val, int))
val = stats.poisson(0.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_stats(self):
mu = 16.0
result = stats.poisson.stats(mu, moments='mvsk')
assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu])
mu = np.array([0.0, 1.0, 2.0])
result = stats.poisson.stats(mu, moments='mvsk')
expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5])
assert_allclose(result, expected)
class TestZipf(TestCase):
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50))
assert_(numpy.all(vals >= 1))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.zipf.rvs(1.5)
assert_(isinstance(val, int))
val = stats.zipf(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
class TestDLaplace(TestCase):
def test_rvs(self):
vals = stats.dlaplace.rvs(1.5, size=(2, 50))
assert_(numpy.shape(vals) == (2, 50))
assert_(vals.dtype.char in typecodes['AllInteger'])
val = stats.dlaplace.rvs(1.5)
assert_(isinstance(val, int))
val = stats.dlaplace(1.5).rvs(3)
assert_(isinstance(val, numpy.ndarray))
assert_(val.dtype.char in typecodes['AllInteger'])
assert_(stats.dlaplace.rvs(0.8) is not None)
def test_stats(self):
# compare the explicit formulas w/ direct summation using pmf
a = 1.
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
N = 37
xx = np.arange(-N, N+1)
pp = dl.pmf(xx)
m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4)
assert_equal((m, s), (0, 0))
assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8)
def test_stats2(self):
a = np.log(2.)
dl = stats.dlaplace(a)
m, v, s, k = dl.stats('mvsk')
assert_equal((m, s), (0., 0.))
assert_allclose((v, k), (4., 3.25))
class TestInvGamma(TestCase):
def test_invgamma_inf_gh_1866(self):
# invgamma's moments are only finite for a>n
# specific numbers checked w/ boost 1.54
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
mvsk = stats.invgamma.stats(a=19.31, moments='mvsk')
expected = [0.05461496450, 0.0001723162534, 1.020362676,
2.055616582]
assert_allclose(mvsk, expected)
a = [1.1, 3.1, 5.6]
mvsk = stats.invgamma.stats(a=a, moments='mvsk')
expected = ([10., 0.476190476, 0.2173913043], # mmm
[np.inf, 0.2061430632, 0.01312749422], # vvv
[np.nan, 41.95235392, 2.919025532], # sss
[np.nan, np.nan, 24.51923076]) # kkk
for x, y in zip(mvsk, expected):
assert_almost_equal(x, y)
def test_cdf_ppf(self):
# gh-6245
x = np.logspace(-2.6, 0)
y = stats.invgamma.cdf(x, 1)
xx = stats.invgamma.ppf(y, 1)
assert_allclose(x, xx)
def test_sf_isf(self):
# gh-6245
if sys.maxsize > 2**32:
x = np.logspace(2, 100)
else:
# Invgamme roundtrip on 32-bit systems has relative accuracy
# ~1e-15 until x=1e+15, and becomes inf above x=1e+18
x = np.logspace(2, 18)
y = stats.invgamma.sf(x, 1)
xx = stats.invgamma.isf(y, 1)
assert_allclose(x, xx, rtol=1.0)
class TestF(TestCase):
def test_f_moments(self):
# n-th moment of F distributions is only finite for n < dfd / 2
m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk')
assert_(np.isfinite(m))
assert_(np.isfinite(v))
assert_(np.isfinite(s))
assert_(not np.isfinite(k))
def test_moments_warnings(self):
# no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk')
@dec.knownfailureif(True, 'f stats does not properly broadcast')
def test_stats_broadcast(self):
# stats do not fully broadcast just yet
mv = stats.f.stats(dfn=11, dfd=[11, 12])
def test_rvgeneric_std():
# Regression test for #1191
assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487])
class TestRvDiscrete(TestCase):
def test_rvs(self):
states = [-1, 0, 1, 2, 3, 4]
probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0]
samples = 1000
r = stats.rv_discrete(name='sample', values=(states, probability))
x = r.rvs(size=samples)
assert_(isinstance(x, numpy.ndarray))
for s, p in zip(states, probability):
assert_(abs(sum(x == s)/float(samples) - p) < 0.05)
x = r.rvs()
assert_(isinstance(x, int))
def test_entropy(self):
# Basic tests of entropy.
pvals = np.array([0.25, 0.45, 0.3])
p = stats.rv_discrete(values=([0, 1, 2], pvals))
expected_h = -sum(xlogy(pvals, pvals))
h = p.entropy()
assert_allclose(h, expected_h)
p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0]))
h = p.entropy()
assert_equal(h, 0.0)
def test_pmf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x = [[1., 4.],
[3., 2]]
assert_allclose(rv.pmf(x),
[[0.5, 0.2],
[0., 0.3]], atol=1e-14)
def test_cdf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5]
expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1]
assert_allclose(rv.cdf(x_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.cdf(xx) for xx in x_values],
expected, atol=1e-14)
def test_ppf(self):
xk = [1, 2, 4]
pk = [0.5, 0.3, 0.2]
rv = stats.rv_discrete(values=(xk, pk))
q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.]
expected = [1, 1, 2, 2, 4, 4]
assert_allclose(rv.ppf(q_values), expected, atol=1e-14)
# also check scalar arguments
assert_allclose([rv.ppf(q) for q in q_values],
expected, atol=1e-14)
def test_cdf_ppf_next(self):
# copied and special cased from test_discrete_basic
vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1])
rv = stats.rv_discrete(values=vals)
assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8),
rv.xk[1:])
def test_expect(self):
xk = [1, 2, 4, 6, 7, 11]
pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1]
rv = stats.rv_discrete(values=(xk, pk))
assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14)
def test_bad_input(self):
xk = [1, 2, 3]
pk = [0.5, 0.5]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
pk = [1, 2, 3]
assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk)))
class TestSkewNorm(TestCase):
def test_normal(self):
# When the skewness is 0 the distribution is normal
x = np.linspace(-5, 5, 100)
assert_array_almost_equal(stats.skewnorm.pdf(x, a=0),
stats.norm.pdf(x))
def test_rvs(self):
shape = (3, 4, 5)
x = stats.skewnorm.rvs(a=0.75, size=shape)
assert_equal(shape, x.shape)
x = stats.skewnorm.rvs(a=-3, size=shape)
assert_equal(shape, x.shape)
def test_moments(self):
X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2)
assert_array_almost_equal([np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)],
stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk'),
decimal=2)
X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2)
assert_array_almost_equal([np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)],
stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk'),
decimal=2)
class TestExpon(TestCase):
def test_zero(self):
assert_equal(stats.expon.pdf(0), 1)
def test_tail(self): # Regression test for ticket 807
assert_equal(stats.expon.cdf(1e-18), 1e-18)
assert_equal(stats.expon.isf(stats.expon.sf(40)), 40)
class TestExponNorm(TestCase):
def test_moments(self):
# Some moment test cases based on non-loc/scaled formula
def get_moms(lam, sig, mu):
# See wikipedia for these formulae
# where it is listed as an exponentially modified gaussian
opK2 = 1.0 + 1 / (lam*sig)**2
exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5)
exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2)
return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt]
mu, sig, lam = 0, 1, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -3, 2, 0.1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = 0, 3, 1
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
mu, sig, lam = -5, 11, 3.5
K = 1.0 / (lam * sig)
sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk')
assert_almost_equal(sts, get_moms(lam, sig, mu))
def test_extremes_x(self):
# Test for extreme values against overflows
assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0)
assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0)
class TestGenExpon(TestCase):
def test_pdf_unity_area(self):
from scipy.integrate import simps
# PDF should integrate to one
p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_almost_equal(simps(p, dx=0.01), 1, 1)
def test_cdf_bounds(self):
# CDF should always be positive
cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0)
assert_(numpy.all((0 <= cdf) & (cdf <= 1)))
class TestExponpow(TestCase):
def test_tail(self):
assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20)
assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8),
5)
class TestSkellam(TestCase):
def test_pmf(self):
# comparison to R
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = numpy.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = numpy.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = numpy.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
class TestLognorm(TestCase):
def test_pdf(self):
# Regression test for Ticket #1471: avoid nan with 0/0 situation
# Also make sure there are no warnings at x=0, cf gh-5202
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
pdf = stats.lognorm.pdf([0, 0.5, 1], 1)
assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228])
def test_logcdf(self):
# Regression test for gh-5940: sf et al would underflow too early
x2, mu, sigma = 201.68, 195, 0.149
assert_allclose(stats.lognorm.sf(x2-mu, s=sigma),
stats.norm.sf(np.log(x2-mu)/sigma))
assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma),
stats.norm.logsf(np.log(x2-mu)/sigma))
class TestBeta(TestCase):
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0, 1, 0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0, 0.5, 1)
assert_almost_equal(logpdf, np.inf)
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
class TestBetaPrime(TestCase):
def test_logpdf(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.betaprime(alpha, beta)
assert_(np.isfinite(b.logpdf(x)).all())
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
def test_cdf(self):
# regression test for gh-4030: Implementation of
# scipy.stats.betaprime.cdf()
x = stats.betaprime.cdf(0, 0.2, 0.3)
assert_equal(x, 0.0)
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
cdfs = stats.betaprime.cdf(x, alpha, beta)
assert_(np.isfinite(cdfs).all())
# check the new cdf implementation vs generic one:
gen_cdf = stats.rv_continuous._cdf_single
cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x]
assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12)
class TestGamma(TestCase):
def test_pdf(self):
# a few test cases to compare with R
pdf = stats.gamma.pdf(90, 394, scale=1./5)
assert_almost_equal(pdf, 0.002312341)
pdf = stats.gamma.pdf(3, 10, scale=1./5)
assert_almost_equal(pdf, 0.1620358)
def test_logpdf(self):
# Regression test for Ticket #1326: cornercase avoid nan with 0*log(0)
# situation
logpdf = stats.gamma.logpdf(0, 1)
assert_almost_equal(logpdf, 0)
class TestChi2(TestCase):
# regression tests after precision improvements, ticket:1041, not verified
def test_precision(self):
assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003,
decimal=14)
assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778,
decimal=14)
class TestGumbelL(TestCase):
# gh-6228
def test_cdf_ppf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.cdf(x)
xx = stats.gumbel_l.ppf(y)
assert_allclose(x, xx)
def test_logcdf_logsf(self):
x = np.linspace(-100, -4)
y = stats.gumbel_l.logcdf(x)
z = stats.gumbel_l.logsf(x)
u = np.exp(y)
v = -special.expm1(z)
assert_allclose(u, v)
def test_sf_isf(self):
x = np.linspace(-20, 5)
y = stats.gumbel_l.sf(x)
xx = stats.gumbel_l.isf(y)
assert_allclose(x, xx)
class TestArrayArgument(TestCase): # test for ticket:992
def test_noexception(self):
rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5),
size=(10, 5))
assert_equal(rvs.shape, (10, 5))
class TestDocstring(TestCase):
def test_docstrings(self):
# See ticket #761
if stats.rayleigh.__doc__ is not None:
self.assertTrue("rayleigh" in stats.rayleigh.__doc__.lower())
if stats.bernoulli.__doc__ is not None:
self.assertTrue("bernoulli" in stats.bernoulli.__doc__.lower())
def test_no_name_arg(self):
# If name is not given, construction shouldn't fail. See #1508.
stats.rv_continuous()
stats.rv_discrete()
class TestEntropy(TestCase):
def test_entropy_positive(self):
# See ticket #497
pk = [0.5, 0.2, 0.3]
qk = [0.1, 0.25, 0.65]
eself = stats.entropy(pk, pk)
edouble = stats.entropy(pk, qk)
assert_(0.0 == eself)
assert_(edouble >= 0.0)
def test_entropy_base(self):
pk = np.ones(16, float)
S = stats.entropy(pk, base=2.)
assert_(abs(S - 4.) < 1.e-5)
qk = np.ones(16, float)
qk[:8] = 2.
S = stats.entropy(pk, qk)
S2 = stats.entropy(pk, qk, base=2.)
assert_(abs(S/S2 - np.log(2.)) < 1.e-5)
def test_entropy_zero(self):
# Test for PR-479
assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278,
decimal=12)
def test_entropy_2d(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[0.1933259, 0.18609809])
def test_entropy_2d_zero(self):
pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]]
qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]]
assert_array_almost_equal(stats.entropy(pk, qk),
[np.inf, 0.18609809])
pk[0][0] = 0.0
assert_array_almost_equal(stats.entropy(pk, qk),
[0.17403988, 0.18609809])
def TestArgsreduce():
a = array([1, 3, 2, 1, 2, 3, 3])
b, c = argsreduce(a > 1, a, 2)
assert_array_equal(b, [3, 2, 2, 3, 3])
assert_array_equal(c, [2, 2, 2, 2, 2])
b, c = argsreduce(2 > 1, a, 2)
assert_array_equal(b, a[0])
assert_array_equal(c, [2])
b, c = argsreduce(a > 0, a, 2)
assert_array_equal(b, a)
assert_array_equal(c, [2] * numpy.size(a))
class TestFitMethod(object):
skip = ['ncf']
@dec.slow
def test_fit(self):
def check(func, dist, args, alpha):
if dist in self.skip:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size': 200})
vals = distfunc.fit(res)
vals2 = distfunc.fit(res, optimizer='powell')
# Only check the length of the return
# FIXME: should check the actual results to see if we are 'close'
# to what was created --- but what is 'close' enough
if dist == 'frechet':
assert_(len(vals) == len(args))
assert_(len(vals2) == len(args))
else:
assert_(len(vals) == 2+len(args))
assert_(len(vals2) == 2+len(args))
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
@dec.slow
def test_fix_fit(self):
def check(func, dist, args, alpha):
# Not sure why 'ncf', and 'beta' are failing
# frechet has different len(args) than distfunc.numargs
if dist in self.skip + ['frechet']:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size': 200})
vals = distfunc.fit(res, floc=0)
vals2 = distfunc.fit(res, fscale=1)
assert_(len(vals) == 2+len(args))
assert_(vals[-2] == 0)
assert_(vals2[-1] == 1)
assert_(len(vals2) == 2+len(args))
if len(args) > 0:
vals3 = distfunc.fit(res, f0=args[0])
assert_(len(vals3) == 2+len(args))
assert_(vals3[0] == args[0])
if len(args) > 1:
vals4 = distfunc.fit(res, f1=args[1])
assert_(len(vals4) == 2+len(args))
assert_(vals4[1] == args[1])
if len(args) > 2:
vals5 = distfunc.fit(res, f2=args[2])
assert_(len(vals5) == 2+len(args))
assert_(vals5[2] == args[2])
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
def test_fix_fit_2args_lognorm(self):
# Regression test for #1551.
np.random.seed(12345)
with np.errstate(all='ignore'):
x = stats.lognorm.rvs(0.25, 0., 20.0, size=20)
assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)),
[0.25888672, 0, 20], atol=1e-5)
def test_fix_fit_norm(self):
x = np.arange(1, 6)
loc, scale = stats.norm.fit(x)
assert_almost_equal(loc, 3)
assert_almost_equal(scale, np.sqrt(2))
loc, scale = stats.norm.fit(x, floc=2)
assert_equal(loc, 2)
assert_equal(scale, np.sqrt(3))
loc, scale = stats.norm.fit(x, fscale=2)
assert_almost_equal(loc, 3)
assert_equal(scale, 2)
def test_fix_fit_gamma(self):
x = np.arange(1, 6)
meanlog = np.log(x).mean()
# A basic test of gamma.fit with floc=0.
floc = 0
a, loc, scale = stats.gamma.fit(x, floc=floc)
s = np.log(x.mean()) - meanlog
assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# Regression tests for gh-2514.
# The problem was that if `floc=0` was given, any other fixed
# parameters were ignored.
f0 = 1
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
f0 = 2
floc = 0
a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc)
assert_equal(a, f0)
assert_equal(loc, floc)
assert_almost_equal(scale, x.mean()/a, decimal=8)
# loc and scale fixed.
floc = 0
fscale = 2
a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale)
assert_equal(loc, floc)
assert_equal(scale, fscale)
c = meanlog - np.log(fscale)
assert_almost_equal(special.digamma(a), c)
def test_fix_fit_beta(self):
# Test beta.fit when both floc and fscale are given.
def mlefunc(a, b, x):
# Zeros of this function are critical points of
# the maximum likelihood function.
n = len(x)
s1 = np.log(x).sum()
s2 = np.log(1-x).sum()
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
# Basic test with floc and fscale given.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1)
assert_equal(loc, 0)
assert_equal(scale, 1)
assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6)
# Basic test with f0, floc and fscale given.
# This is also a regression test for gh-2514.
x = np.array([0.125, 0.25, 0.5])
a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1)
assert_equal(a, 2)
assert_equal(loc, 0)
assert_equal(scale, 1)
da, db = mlefunc(a, b, x)
assert_allclose(db, 0, atol=1e-5)
# Same floc and fscale values as above, but reverse the data
# and fix b (f1).
x2 = 1 - x
a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1)
assert_equal(b2, 2)
assert_equal(loc2, 0)
assert_equal(scale2, 1)
da, db = mlefunc(a2, b2, x2)
assert_allclose(da, 0, atol=1e-5)
# a2 of this test should equal b from above.
assert_almost_equal(a2, b)
# Check for detection of data out of bounds when floc and fscale
# are given.
assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1)
y = np.array([0, .5, 1])
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2)
assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2)
# Check that attempting to fix all the parameters raises a ValueError.
assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1,
floc=2, fscale=3)
def test_fshapes(self):
# take a beta distribution, with shapes='a, b', and make sure that
# fa is equivalent to f0, and fb is equivalent to f1
a, b = 3., 4.
x = stats.beta.rvs(a, b, size=100, random_state=1234)
res_1 = stats.beta.fit(x, f0=3.)
res_2 = stats.beta.fit(x, fa=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_2 = stats.beta.fit(x, fix_a=3.)
assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12)
res_3 = stats.beta.fit(x, f1=4.)
res_4 = stats.beta.fit(x, fb=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
res_4 = stats.beta.fit(x, fix_b=4.)
assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12)
# cannot specify both positional and named args at the same time
assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2)
# check that attempting to fix all parameters raises a ValueError
assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1,
floc=2, fscale=3)
# check that specifying floc, fscale and fshapes works for
# beta and gamma which override the generic fit method
res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1)
aa, bb, ll, ss = res_5
assert_equal([aa, ll, ss], [3., 0, 1])
# gamma distribution
a = 3.
data = stats.gamma.rvs(a, size=100)
aa, ll, ss = stats.gamma.fit(data, fa=a)
assert_equal(aa, a)
def test_extra_params(self):
# unknown parameters should raise rather than be silently ignored
dist = stats.exponnorm
data = dist.rvs(K=2, size=100)
dct = dict(enikibeniki=-101)
assert_raises(TypeError, dist.fit, data, **dct)
class TestFrozen(TestCase):
# Test that a frozen distribution gives the same results as the original
# object.
#
# Only tested for the normal distribution (with loc and scale specified)
# and for the gamma distribution (with a shape parameter specified).
def test_norm(self):
dist = stats.norm
frozen = stats.norm(loc=10.0, scale=3.0)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(loc=10.0, scale=3.0)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, loc=10.0, scale=3.0)
assert_equal(result_f, result)
assert_equal(frozen.a, dist.a)
assert_equal(frozen.b, dist.b)
def test_gamma(self):
a = 2.0
dist = stats.gamma
frozen = stats.gamma(a)
result_f = frozen.pdf(20.0)
result = dist.pdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.cdf(20.0)
result = dist.cdf(20.0, a)
assert_equal(result_f, result)
result_f = frozen.ppf(0.25)
result = dist.ppf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.isf(0.25)
result = dist.isf(0.25, a)
assert_equal(result_f, result)
result_f = frozen.sf(10.0)
result = dist.sf(10.0, a)
assert_equal(result_f, result)
result_f = frozen.median()
result = dist.median(a)
assert_equal(result_f, result)
result_f = frozen.mean()
result = dist.mean(a)
assert_equal(result_f, result)
result_f = frozen.var()
result = dist.var(a)
assert_equal(result_f, result)
result_f = frozen.std()
result = dist.std(a)
assert_equal(result_f, result)
result_f = frozen.entropy()
result = dist.entropy(a)
assert_equal(result_f, result)
result_f = frozen.moment(2)
result = dist.moment(2, a)
assert_equal(result_f, result)
assert_equal(frozen.a, frozen.dist.a)
assert_equal(frozen.b, frozen.dist.b)
def test_regression_ticket_1293(self):
# Create a frozen distribution.
frozen = stats.lognorm(1)
# Call one of its methods that does not take any keyword arguments.
m1 = frozen.moment(2)
# Now call a method that takes a keyword argument.
frozen.stats(moments='mvsk')
# Call moment(2) again.
# After calling stats(), the following was raising an exception.
# So this test passes if the following does not raise an exception.
m2 = frozen.moment(2)
# The following should also be true, of course. But it is not
# the focus of this test.
assert_equal(m1, m2)
def test_ab(self):
# test that the support of a frozen distribution
# (i) remains frozen even if it changes for the original one
# (ii) is actually correct if the shape parameters are such that
# the values of [a, b] are not the default [0, inf]
# take a genpareto as an example where the support
# depends on the value of the shape parameter:
# for c > 0: a, b = 0, inf
# for c < 0: a, b = 0, -1/c
rv = stats.genpareto(c=-0.1)
a, b = rv.dist.a, rv.dist.b
assert_equal([a, b], [0., 10.])
assert_equal([rv.a, rv.b], [0., 10.])
stats.genpareto.pdf(0, c=0.1) # this changes genpareto.b
assert_equal([rv.dist.a, rv.dist.b], [a, b])
assert_equal([rv.a, rv.b], [a, b])
rv1 = stats.genpareto(c=0.1)
assert_(rv1.dist is not rv.dist)
def test_rv_frozen_in_namespace(self):
# Regression test for gh-3522
assert_(hasattr(stats.distributions, 'rv_frozen'))
def test_random_state(self):
# only check that the random_state attribute exists,
frozen = stats.norm()
assert_(hasattr(frozen, 'random_state'))
# ... that it can be set,
frozen.random_state = 42
assert_equal(frozen.random_state.get_state(),
np.random.RandomState(42).get_state())
# ... and that .rvs method accepts it as an argument
rndm = np.random.RandomState(1234)
frozen.rvs(size=8, random_state=rndm)
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
def test_expect(self):
# smoke test the expect method of the frozen distribution
# only take a gamma w/loc and scale and poisson with loc specified
def func(x):
return x
gm = stats.gamma(a=2, loc=3, scale=4)
gm_val = gm.expect(func, lb=1, ub=2, conditional=True)
gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4,
lb=1, ub=2, conditional=True)
assert_allclose(gm_val, gamma_val)
p = stats.poisson(3, loc=4)
p_val = p.expect(func)
poisson_val = stats.poisson.expect(func, args=(3,), loc=4)
assert_allclose(p_val, poisson_val)
class TestExpect(TestCase):
# Test for expect method.
#
# Uses normal distribution and beta distribution for finite bounds, and
# hypergeom for discrete distribution with finite support
def test_norm(self):
v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2)
assert_almost_equal(v, 4, decimal=14)
m = stats.norm.expect(lambda x: (x), loc=5, scale=2)
assert_almost_equal(m, 5, decimal=14)
lb = stats.norm.ppf(0.05, loc=5, scale=2)
ub = stats.norm.ppf(0.95, loc=5, scale=2)
prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub)
assert_almost_equal(prob90, 0.9, decimal=14)
prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub,
conditional=True)
assert_almost_equal(prob90c, 1., decimal=14)
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
def test_hypergeom(self):
# test case with finite bounds
# without specifying bounds
m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.)
m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.)
assert_almost_equal(m, m_true, decimal=13)
v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8),
loc=5.)
assert_almost_equal(v, v_true, decimal=14)
# with bounds, bounds equal to shifted support
v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2,
args=(20, 10, 8),
loc=5., lb=5, ub=13)
assert_almost_equal(v_bounds, v_true, decimal=14)
# drop boundary points
prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum()
prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
loc=5., lb=6, ub=12)
assert_almost_equal(prob_bounds, prob_true, decimal=13)
# conditional
prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5.,
lb=6, ub=12, conditional=True)
assert_almost_equal(prob_bc, 1, decimal=14)
# check simple integral
prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8),
lb=0, ub=8)
assert_almost_equal(prob_b, 1, decimal=13)
def test_poisson(self):
# poisson, use lower bound only
prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3,
conditional=False)
prob_b_true = 1-stats.poisson.cdf(2, 2)
assert_almost_equal(prob_bounds, prob_b_true, decimal=14)
prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2,
conditional=True)
assert_almost_equal(prob_lb, 1, decimal=14)
def test_genhalflogistic(self):
# genhalflogistic, changes upper bound of support in _argcheck
# regression test for gh-2622
halflog = stats.genhalflogistic
# check consistency when calling expect twice with the same input
res1 = halflog.expect(args=(1.5,))
halflog.expect(args=(0.5,))
res2 = halflog.expect(args=(1.5,))
assert_almost_equal(res1, res2, decimal=14)
def test_rice_overflow(self):
# rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows
# check that using i0e fixes it
assert_(np.isfinite(stats.rice.pdf(999, 0.74)))
assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,))))
assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,))))
def test_logser(self):
# test a discrete distribution with infinite support and loc
p, loc = 0.3, 3
res_0 = stats.logser.expect(lambda k: k, args=(p,))
# check against the correct answer (sum of a geom series)
assert_allclose(res_0,
p / (p - 1.) / np.log(1. - p), atol=1e-15)
# now check it with `loc`
res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc)
assert_allclose(res_l, res_0 + loc, atol=1e-15)
def test_skellam(self):
# Use a discrete distribution w/ bi-infinite support. Compute two first
# moments and compare to known values (cf skellam.stats)
p1, p2 = 18, 22
m1 = stats.skellam.expect(lambda x: x, args=(p1, p2))
m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2))
assert_allclose(m1, p1 - p2, atol=1e-12)
assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12)
def test_randint(self):
# Use a discrete distribution w/ parameter-dependent support, which
# is larger than the default chunksize
lo, hi = 0, 113
res = stats.randint.expect(lambda x: x, (lo, hi))
assert_allclose(res,
sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15)
def test_zipf(self):
# Test that there is no infinite loop even if the sum diverges
assert_warns(RuntimeWarning, stats.zipf.expect,
lambda x: x**2, (2,))
def test_discrete_kwds(self):
# check that discrete expect accepts keywords to control the summation
n0 = stats.poisson.expect(lambda x: 1, args=(2,))
n1 = stats.poisson.expect(lambda x: 1, args=(2,),
maxcount=1001, chunksize=32, tolerance=1e-8)
assert_almost_equal(n0, n1, decimal=14)
def test_moment(self):
# test the .moment() method: compute a higher moment and compare to
# a known value
def poiss_moment5(mu):
return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu
for mu in [5, 7]:
m5 = stats.poisson.moment(5, mu)
assert_allclose(m5, poiss_moment5(mu), rtol=1e-10)
class TestNct(TestCase):
def test_nc_parameter(self):
# Parameter values c<=0 were not enabled (gh-2402).
# For negative values c and for c=0 results of rv.cdf(0) below were nan
rv = stats.nct(5, 0)
assert_equal(rv.cdf(0), 0.5)
rv = stats.nct(5, -1)
assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10)
def test_broadcasting(self):
res = stats.nct.pdf(5, np.arange(4, 7)[:, None],
np.linspace(0.1, 1, 4))
expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997],
[0.00217142, 0.00395366, 0.00683888, 0.01126276],
[0.00153078, 0.00291093, 0.00525206, 0.00900815]])
assert_allclose(res, expected, rtol=1e-5)
def test_variance_gh_issue_2401(self):
# Computation of the variance of a non-central t-distribution resulted
# in a TypeError: ufunc 'isinf' not supported for the input types,
# and the inputs could not be safely coerced to any supported types
# according to the casting rule 'safe'
rv = stats.nct(4, 0)
assert_equal(rv.var(), 2.0)
def test_nct_inf_moments(self):
# n-th moment of nct only exists for df > n
m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk')
assert_(np.isfinite(m))
assert_equal([v, s, k], [np.inf, np.nan, np.nan])
m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk')
assert_(np.isfinite([m, v, s]).all())
assert_equal(k, np.nan)
class TestRice(TestCase):
def test_rice_zero_b(self):
# rice distribution should work with b=0, cf gh-2164
x = [0.2, 1., 5.]
assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all())
assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all())
q = [0.1, 0.1, 0.5, 0.9]
assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all())
mvsk = stats.rice.stats(0, moments='mvsk')
assert_(np.isfinite(mvsk).all())
# furthermore, pdf is continuous as b\to 0
# rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2)
# see e.g. Abramovich & Stegun 9.6.7 & 9.6.10
b = 1e-8
assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b),
atol=b, rtol=0)
def test_rice_rvs(self):
rvs = stats.rice.rvs
assert_equal(rvs(b=3.).size, 1)
assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5))
class TestErlang(TestCase):
def test_erlang_runtimewarning(self):
# erlang should generate a RuntimeWarning if a non-integer
# shape parameter is used.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
# The non-integer shape parameter 1.3 should trigger a
# RuntimeWarning
assert_raises(RuntimeWarning,
stats.erlang.rvs, 1.3, loc=0, scale=1, size=4)
# Calling the fit method with `f0` set to an integer should
# *not* trigger a RuntimeWarning. It should return the same
# values as gamma.fit(...).
data = [0.5, 1.0, 2.0, 4.0]
result_erlang = stats.erlang.fit(data, f0=1)
result_gamma = stats.gamma.fit(data, f0=1)
assert_allclose(result_erlang, result_gamma, rtol=1e-3)
class TestRayleigh(TestCase):
# gh-6227
def test_logpdf(self):
y = stats.rayleigh.logpdf(50)
assert_allclose(y, -1246.0879769945718)
def test_logsf(self):
y = stats.rayleigh.logsf(50)
assert_allclose(y, -1250)
class TestExponWeib(TestCase):
def test_pdf_logpdf(self):
# Regression test for gh-3508.
x = 0.1
a = 1.0
c = 100.0
p = stats.exponweib.pdf(x, a, c)
logp = stats.exponweib.logpdf(x, a, c)
# Expected values were computed with mpmath.
assert_allclose([p, logp],
[1.0000000000000054e-97, -223.35075402042244])
def test_a_is_1(self):
# For issue gh-3508.
# Check that when a=1, the pdf and logpdf methods of exponweib are the
# same as those of weibull_min.
x = np.logspace(-4, -1, 4)
a = 1
c = 100
p = stats.exponweib.pdf(x, a, c)
expected = stats.weibull_min.pdf(x, c)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.weibull_min.logpdf(x, c)
assert_allclose(logp, expected)
def test_a_is_1_c_is_1(self):
# When a = 1 and c = 1, the distribution is exponential.
x = np.logspace(-8, 1, 10)
a = 1
c = 1
p = stats.exponweib.pdf(x, a, c)
expected = stats.expon.pdf(x)
assert_allclose(p, expected)
logp = stats.exponweib.logpdf(x, a, c)
expected = stats.expon.logpdf(x)
assert_allclose(logp, expected)
class TestWeibull(TestCase):
def test_logpdf(self):
# gh-6217
y = stats.weibull_min.logpdf(0, 1)
assert_equal(y, 0)
def test_with_maxima_distrib(self):
# Tests for weibull_min and weibull_max.
# The expected values were computed using the symbolic algebra
# program 'maxima' with the package 'distrib', which has
# 'pdf_weibull' and 'cdf_weibull'. The mapping between the
# scipy and maxima functions is as follows:
# -----------------------------------------------------------------
# scipy maxima
# --------------------------------- ------------------------------
# weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b)
# weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b))
# weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b)
# weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b))
# weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b)
# weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b))
#
# weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b)
# weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b))
# weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b)
# weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b))
# weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b)
# weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b))
# -----------------------------------------------------------------
x = 1.5
a = 2.0
b = 3.0
# weibull_min
p = stats.weibull_min.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_min.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_min.cdf(x, a, scale=b)
assert_allclose(c, -special.expm1(-0.25))
lc = stats.weibull_min.logcdf(x, a, scale=b)
assert_allclose(lc, np.log(-special.expm1(-0.25)))
s = stats.weibull_min.sf(x, a, scale=b)
assert_allclose(s, np.exp(-0.25))
ls = stats.weibull_min.logsf(x, a, scale=b)
assert_allclose(ls, -0.25)
# Also test using a large value x, for which computing the survival
# function using the CDF would result in 0.
s = stats.weibull_min.sf(30, 2, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.weibull_min.logsf(30, 2, scale=3)
assert_allclose(ls, -100)
# weibull_max
x = -1.5
p = stats.weibull_max.pdf(x, a, scale=b)
assert_allclose(p, np.exp(-0.25)/3)
lp = stats.weibull_max.logpdf(x, a, scale=b)
assert_allclose(lp, -0.25 - np.log(3))
c = stats.weibull_max.cdf(x, a, scale=b)
assert_allclose(c, np.exp(-0.25))
lc = stats.weibull_max.logcdf(x, a, scale=b)
assert_allclose(lc, -0.25)
s = stats.weibull_max.sf(x, a, scale=b)
assert_allclose(s, -special.expm1(-0.25))
ls = stats.weibull_max.logsf(x, a, scale=b)
assert_allclose(ls, np.log(-special.expm1(-0.25)))
# Also test using a value of x close to 0, for which computing the
# survival function using the CDF would result in 0.
s = stats.weibull_max.sf(-1e-9, 2, scale=3)
assert_allclose(s, -special.expm1(-1/9000000000000000000))
ls = stats.weibull_max.logsf(-1e-9, 2, scale=3)
assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000)))
class TestRdist(TestCase):
@dec.slow
def test_rdist_cdf_gh1285(self):
# check workaround in rdist._cdf for issue gh-1285.
distfn = stats.rdist
values = [0.001, 0.5, 0.999]
assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0),
values, decimal=5)
class TestTrapz(TestCase):
def test_reduces_to_triang(self):
modes = [0.3, 0.5]
for mode in modes:
x = [0, mode, 1]
assert_almost_equal(stats.trapz.pdf(x, mode, mode),
stats.triang.pdf(x, mode))
assert_almost_equal(stats.trapz.cdf(x, mode, mode),
stats.triang.cdf(x, mode))
def test_reduces_to_uniform(self):
x = np.linspace(0, 1, 10)
old_err = np.seterr(divide='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
assert_almost_equal(stats.trapz.pdf(x, 0, 1),
stats.uniform.pdf(x))
assert_almost_equal(stats.trapz.cdf(x, 0, 1),
stats.uniform.cdf(x))
np.seterr(**old_err)
def test_cases(self):
old_err = np.seterr(divide='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
# edge cases
assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2)
assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2)
assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8), 1.11111111111111111)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0), 1.11111111111111111)
# straightforward case
assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25)
assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625)
assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125)
assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125)
assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5)
assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875)
assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0)
np.seterr(**old_err)
def test_trapz_vect(self):
# test that array-valued shapes and arguments are handled
c = np.array([0.1, 0.2, 0.3])
d = np.array([0.5, 0.6])[:, None]
x = np.array([0.15, 0.25, 0.9])
v = stats.trapz.pdf(x, c, d)
cc, dd, xx = np.broadcast_arrays(c, d, x)
res = np.empty(xx.size, dtype=xx.dtype)
ind = np.arange(xx.size)
for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()):
res[i] = stats.trapz.pdf(x1, c1, d1)
assert_allclose(v, res.reshape(v.shape), atol=1e-15)
def test_540_567():
# test for nan returned in tickets 540, 567
assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846,
decimal=10, err_msg='test_540_567')
assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309,
scale=0.204423758009),
0.98353464004309321,
decimal=10, err_msg='test_540_567')
def test_regression_ticket_1316():
# The following was raising an exception, because _construct_default_doc()
# did not handle the default keyword extradoc=None. See ticket #1316.
g = stats._continuous_distns.gamma_gen(name='gamma')
def test_regression_ticket_1326():
# adjust to avoid nan with 0*log(0)
assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14)
def test_regression_tukey_lambda():
# Make sure that Tukey-Lambda distribution correctly handles
# non-positive lambdas.
x = np.linspace(-5.0, 5.0, 101)
olderr = np.seterr(divide='ignore')
try:
for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]:
p = stats.tukeylambda.pdf(x, lam)
assert_((p != 0.0).all())
assert_(~np.isnan(p).all())
lam = np.array([[-1.0], [0.0], [2.0]])
p = stats.tukeylambda.pdf(x, lam)
finally:
np.seterr(**olderr)
assert_(~np.isnan(p).all())
assert_((p[0] != 0.0).all())
assert_((p[1] != 0.0).all())
assert_((p[2] != 0.0).any())
assert_((p[2] == 0.0).any())
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_regression_ticket_1421():
assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__)
assert_('pmf(x,' in stats.poisson.__doc__)
def test_nan_arguments_gh_issue_1362():
with np.errstate(invalid='ignore'):
assert_(np.isnan(stats.t.logcdf(1, np.nan)))
assert_(np.isnan(stats.t.cdf(1, np.nan)))
assert_(np.isnan(stats.t.logsf(1, np.nan)))
assert_(np.isnan(stats.t.sf(1, np.nan)))
assert_(np.isnan(stats.t.pdf(1, np.nan)))
assert_(np.isnan(stats.t.logpdf(1, np.nan)))
assert_(np.isnan(stats.t.ppf(1, np.nan)))
assert_(np.isnan(stats.t.isf(1, np.nan)))
assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5)))
assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5)))
def test_frozen_fit_ticket_1536():
np.random.seed(5678)
true = np.array([0.25, 0., 0.5])
x = stats.lognorm.rvs(true[0], true[1], true[2], size=100)
olderr = np.seterr(divide='ignore')
try:
params = np.array(stats.lognorm.fit(x, floc=0.))
finally:
np.seterr(**olderr)
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0))
assert_almost_equal(params, true, decimal=2)
params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0))
assert_almost_equal(params, true, decimal=2)
np.random.seed(5678)
loc = 1
floc = 0.9
x = stats.norm.rvs(loc, 2., size=100)
params = np.array(stats.norm.fit(x, floc=floc))
expected = np.array([floc, np.sqrt(((x-floc)**2).mean())])
assert_almost_equal(params, expected, decimal=4)
def test_regression_ticket_1530():
# Check the starting value works for Cauchy distribution fit.
np.random.seed(654321)
rvs = stats.cauchy.rvs(size=100)
params = stats.cauchy.fit(rvs)
expected = (0.045, 1.142)
assert_almost_equal(params, expected, decimal=1)
def test_gh_pr_4806():
# Check starting values for Cauchy distribution fit.
np.random.seed(1234)
x = np.random.randn(42)
for offset in 10000.0, 1222333444.0:
loc, scale = stats.cauchy.fit(x + offset)
assert_allclose(loc, offset, atol=1.0)
assert_allclose(scale, 0.6, atol=1.0)
def test_tukeylambda_stats_ticket_1545():
# Some test for the variance and kurtosis of the Tukey Lambda distr.
# See test_tukeylamdba_stats.py for more tests.
mv = stats.tukeylambda.stats(0, moments='mvsk')
# Known exact values:
expected = [0, np.pi**2/3, 0, 1.2]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(3.13, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 0.0269220858861465102, 0, -0.898062386219224104]
assert_almost_equal(mv, expected, decimal=10)
mv = stats.tukeylambda.stats(0.14, moments='mvsk')
# 'expected' computed with mpmath.
expected = [0, 2.11029702221450250, 0, -0.02708377353223019456]
assert_almost_equal(mv, expected, decimal=10)
def test_poisson_logpmf_ticket_1436():
assert_(np.isfinite(stats.poisson.logpmf(1500, 200)))
def test_powerlaw_stats():
"""Test the powerlaw stats function.
This unit test is also a regression test for ticket 1548.
The exact values are:
mean:
mu = a / (a + 1)
variance:
sigma**2 = a / ((a + 2) * (a + 1) ** 2)
skewness:
One formula (see http://en.wikipedia.org/wiki/Skewness) is
gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3
A short calculation shows that E[X**k] is a / (a + k), so gamma_1
can be implemented as
n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3
d = sqrt(a/((a+2)*(a+1)**2)) ** 3
gamma_1 = n/d
Either by simplifying, or by a direct calculation of mu_3 / sigma**3,
one gets the more concise formula:
gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a)
kurtosis: (See http://en.wikipedia.org/wiki/Kurtosis)
The excess kurtosis is
gamma_2 = mu_4 / sigma**4 - 3
A bit of calculus and algebra (sympy helps) shows that
mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4))
so
gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3
which can be rearranged to
gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4))
"""
cases = [(1.0, (0.5, 1./12, 0.0, -1.2)),
(2.0, (2./3, 2./36, -0.56568542494924734, -0.6))]
for a, exact_mvsk in cases:
mvsk = stats.powerlaw.stats(a, moments="mvsk")
assert_array_almost_equal(mvsk, exact_mvsk)
def test_powerlaw_edge():
# Regression test for gh-3986.
p = stats.powerlaw.logpdf(0, 1)
assert_equal(p, 0.0)
def test_exponpow_edge():
# Regression test for gh-3982.
p = stats.exponpow.logpdf(0, 1)
assert_equal(p, 0.0)
# Check pdf and logpdf at x = 0 for other values of b.
p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 1.0, 0.0])
p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5])
assert_equal(p, [np.inf, 0.0, -np.inf])
def test_gengamma_edge():
# Regression test for gh-3985.
p = stats.gengamma.pdf(0, 1, 1)
assert_equal(p, 1.0)
# Regression tests for gh-4724.
p = stats.gengamma._munp(-2, 200, 1.)
assert_almost_equal(p, 1./199/198)
p = stats.gengamma._munp(-2, 10, 1.)
assert_almost_equal(p, 1./9/8)
def test_ksone_fit_freeze():
# Regression test for ticket #1638.
d = np.array(
[-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649,
-0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294,
0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048,
0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724,
0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662,
0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391,
-0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725,
-0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199,
-0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746,
-0.06037974, 0.37670779, -0.21684405])
try:
olderr = np.seterr(invalid='ignore')
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', RuntimeWarning)
stats.ksone.fit(d)
finally:
np.seterr(**olderr)
def test_norm_logcdf():
# Test precision of the logcdf of the normal distribution.
# This precision was enhanced in ticket 1614.
x = -np.asarray(list(range(0, 120, 4)))
# Values from R
expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300,
-131.69539607, -203.91715537, -292.09872100, -396.25241451,
-516.38564863, -652.50322759, -804.60844201, -972.70364403,
-1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068,
-2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493,
-3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522,
-4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548,
-6277.63751711, -6733.67260303]
assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8)
# also test the complex-valued code path
assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8)
# test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf)
deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag
deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x))
assert_allclose(deriv, deriv_expected, atol=1e-10)
def test_levy_cdf_ppf():
# Test levy.cdf, including small arguments.
x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001])
# Expected values were calculated separately with mpmath.
# E.g.
# >>> mpmath.mp.dps = 100
# >>> x = mpmath.mp.mpf('0.01')
# >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x)))
expected = np.array([0.9747728793699604,
0.3173105078629141,
0.1572992070502851,
0.0015654022580025495,
1.523970604832105e-23,
1.795832784800726e-219])
y = stats.levy.cdf(x)
assert_allclose(y, expected, rtol=1e-10)
# ppf(expected) should get us back to x.
xx = stats.levy.ppf(expected)
assert_allclose(xx, x, rtol=1e-13)
def test_hypergeom_interval_1802():
# these two had endless loops
assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757),
(152.0, 197.0))
assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757),
(152.0, 197.0))
# this was working also before
assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757),
(153.0, 196.0))
# degenerate case .a == .b
assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8)
assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8)
def test_distribution_too_many_args():
# Check that a TypeError is raised when too many args are given to a method
# Regression test for ticket 1815.
x = np.linspace(0.1, 0.7, num=5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5)
assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5)
assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5)
# These should not give errors
stats.gamma.pdf(x, 2, 3) # loc=3
stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4
stats.gamma.stats(2., 3)
stats.gamma.stats(2., 3, 4)
stats.gamma.stats(2., 3, 4, 'mv')
stats.gamma.rvs(2., 3, 4, 5)
stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.)
# Also for a discrete distribution
stats.geom.pmf(x, 2, loc=3) # no error, loc=3
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4)
assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4)
# And for distributions with 0, 2 and 3 args respectively
assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0)
assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0)
assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5)
stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale
def test_ncx2_tails_ticket_955():
# Trac #955 -- check that the cdf computed by special functions
# matches the integrated pdf
a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02)
assert_allclose(a, b, rtol=1e-3, atol=0)
def test_ncx2_tails_pdf():
# ncx2.pdf does not return nans in extreme tails(example from gh-1577)
# NB: this is to check that nan_to_num is not needed in ncx2.pdf
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0)
logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2)
assert_(np.isneginf(logval).all())
def test_foldnorm_zero():
# Parameter value c=0 was not enabled, see gh-2399.
rv = stats.foldnorm(0, scale=1)
assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan
def test_stats_shapes_argcheck():
# stats method was failing for vector shapes if some of the values
# were outside of the allowed range, see gh-2678
mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a`
mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5)
mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# -1 is not a legal shape parameter
mv3 = stats.lognorm.stats([2, 2.4, -1])
mv2 = stats.lognorm.stats([2, 2.4])
mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2)
assert_equal(mv2_augmented, mv3)
# FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix.
# stats method with multiple shape parameters is not properly vectorized
# anyway, so some distributions may or may not fail.
# Test subclassing distributions w/ explicit shapes
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, a):
return 42
class _distr2_gen(stats.rv_continuous):
def _cdf(self, x, a):
return 42 * a + x
class _distr3_gen(stats.rv_continuous):
def _pdf(self, x, a, b):
return a + b
def _cdf(self, x, a):
# Different # of shape params from _pdf, to be able to check that
# inspection catches the inconsistency."""
return 42 * a + x
class _distr6_gen(stats.rv_continuous):
# Two shape parameters (both _pdf and _cdf defined, consistent shapes.)
def _pdf(self, x, a, b):
return a*x + b
def _cdf(self, x, a, b):
return 42 * a + x
class TestSubclassingExplicitShapes(TestCase):
# Construct a distribution w/ explicit shapes parameter and test it.
def test_correct_shapes(self):
dummy_distr = _distr_gen(name='dummy', shapes='a')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_wrong_shapes_1(self):
dummy_distr = _distr_gen(name='dummy', shapes='A')
assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1))
def test_wrong_shapes_2(self):
dummy_distr = _distr_gen(name='dummy', shapes='a, b, c')
dct = dict(a=1, b=2, c=3)
assert_raises(TypeError, dummy_distr.pdf, 1, **dct)
def test_shapes_string(self):
# shapes must be a string
dct = dict(name='dummy', shapes=42)
assert_raises(TypeError, _distr_gen, **dct)
def test_shapes_identifiers_1(self):
# shapes must be a comma-separated list of valid python identifiers
dct = dict(name='dummy', shapes='(!)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_2(self):
dct = dict(name='dummy', shapes='4chan')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_3(self):
dct = dict(name='dummy', shapes='m(fti)')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_identifiers_nodefaults(self):
dct = dict(name='dummy', shapes='a=2')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_args(self):
dct = dict(name='dummy', shapes='*args')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_kwargs(self):
dct = dict(name='dummy', shapes='**kwargs')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_keywords(self):
# python keywords cannot be used for shape parameters
dct = dict(name='dummy', shapes='a, b, c, lambda')
assert_raises(SyntaxError, _distr_gen, **dct)
def test_shapes_signature(self):
# test explicit shapes which agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a')
assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2)
def test_shapes_signature_inconsistent(self):
# test explicit shapes which do not agree w/ the signature of _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a):
return stats.norm._pdf(x) * a
dist = _dist_gen(shapes='a, b')
assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2))
def test_star_args(self):
# test _pdf with only starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg
dist = _dist_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33)
assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33)
assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33))
def test_star_args_2(self):
# test _pdf with named & starargs
# NB: **kwargs of pdf will never reach _pdf
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, offset, *args):
extra_kwarg = args[0]
return stats.norm._pdf(x) * extra_kwarg + offset
dist = _dist_gen(shapes='offset, extra_kwarg')
assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33),
stats.norm.pdf(0.5)*33 + 111)
assert_equal(dist.pdf(0.5, 111, 33),
stats.norm.pdf(0.5)*33 + 111)
def test_extra_kwarg(self):
# **kwargs to _pdf are ignored.
# this is a limitation of the framework (_pdf(x, *goodargs))
class _distr_gen(stats.rv_continuous):
def _pdf(self, x, *args, **kwargs):
# _pdf should handle *args, **kwargs itself. Here "handling"
# is ignoring *args and looking for ``extra_kwarg`` and using
# that.
extra_kwarg = kwargs.pop('extra_kwarg', 1)
return stats.norm._pdf(x) * extra_kwarg
dist = _distr_gen(shapes='extra_kwarg')
assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1))
def shapes_empty_string(self):
# shapes='' is equivalent to shapes=None
class _dist_gen(stats.rv_continuous):
def _pdf(self, x):
return stats.norm.pdf(x)
dist = _dist_gen(shapes='')
assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5))
class TestSubclassingNoShapes(TestCase):
# Construct a distribution w/o explicit shapes parameter and test it.
def test_only__pdf(self):
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.pdf(1, a=1), 42)
def test_only__cdf(self):
# _pdf is determined from _cdf by taking numerical derivative
dummy_distr = _distr2_gen(name='dummy')
assert_almost_equal(dummy_distr.pdf(1, a=1), 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection(self):
# check that _pdf signature inspection works correctly, and is used in
# the class docstring
dummy_distr = _distr_gen(name='dummy')
assert_equal(dummy_distr.numargs, 1)
assert_equal(dummy_distr.shapes, 'a')
res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_signature_inspection_2args(self):
# same for 2 shape params and both _pdf and _cdf defined
dummy_distr = _distr6_gen(name='dummy')
assert_equal(dummy_distr.numargs, 2)
assert_equal(dummy_distr.shapes, 'a, b')
res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)',
dummy_distr.__doc__)
assert_(len(res) == 1)
def test_signature_inspection_2args_incorrect_shapes(self):
# both _pdf and _cdf defined, but shapes are inconsistent: raises
try:
_distr3_gen(name='dummy')
except TypeError:
pass
else:
raise AssertionError('TypeError not raised.')
def test_defaults_raise(self):
# default arguments should raise
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a=42):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_starargs_raise(self):
# without explicit shapes, *args are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, *args):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
def test_kwargs_raise(self):
# without explicit shapes, **kwargs are not allowed
class _dist_gen(stats.rv_continuous):
def _pdf(self, x, a, **kwargs):
return 42
assert_raises(TypeError, _dist_gen, **dict(name='dummy'))
@dec.skipif(DOCSTRINGS_STRIPPED)
def test_docstrings():
badones = [r',\s*,', r'\(\s*,', r'^\s*:']
for distname in stats.__all__:
dist = getattr(stats, distname)
if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)):
for regex in badones:
assert_(re.search(regex, dist.__doc__) is None)
def test_infinite_input():
assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0)
assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1)
def test_lomax_accuracy():
# regression test for gh-4033
p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_gompertz_accuracy():
# Regression test for gh-4031
p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_truncexpon_accuracy():
# regression test for gh-4035
p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1)
assert_allclose(p, 1e-100)
def test_rayleigh_accuracy():
# regression test for gh-4034
p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1)
assert_almost_equal(p, 9.0, decimal=15)
def test_genextreme_give_no_warnings():
"""regression test for gh-6219"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
p = stats.genextreme.cdf(.5, 0)
p = stats.genextreme.pdf(.5, 0)
p = stats.genextreme.ppf(.5, 0)
p = stats.genextreme.logpdf(-np.inf, 0.0)
number_of_warnings_thrown = len(w)
assert_equal(number_of_warnings_thrown, 0)
def test_genextreme_entropy():
# regression test for gh-5181
euler_gamma = 0.5772156649015329
h = stats.genextreme.entropy(-1.0)
assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(0)
assert_allclose(h, euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(1.0)
assert_equal(h, 1)
h = stats.genextreme.entropy(-2.0, scale=10)
assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)
h = stats.genextreme.entropy(10)
assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)
h = stats.genextreme.entropy(-10)
assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14)
def test_genextreme_sf_isf():
# Expected values were computed using mpmath:
#
# import mpmath
#
# def mp_genextreme_sf(x, xi, mu=0, sigma=1):
# # Formula from wikipedia, which has a sign convention for xi that
# # is the opposite of scipy's shape parameter.
# if xi != 0:
# t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi)
# else:
# t = mpmath.exp(-(x - mu)/sigma)
# return 1 - mpmath.exp(-t)
#
# >>> mpmath.mp.dps = 1000
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125"))
# >>> float(s)
# 1.6777205262585625e-57
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125"))
# >>> float(s)
# 1.52587890625e-21
# >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0"))
# >>> float(s)
# 0.00034218086528426593
x = 1e8
s = stats.genextreme.sf(x, -0.125)
assert_allclose(s, 1.6777205262585625e-57)
x2 = stats.genextreme.isf(s, -0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0.125)
assert_allclose(s, 1.52587890625e-21)
x2 = stats.genextreme.isf(s, 0.125)
assert_allclose(x2, x)
x = 7.98
s = stats.genextreme.sf(x, 0)
assert_allclose(s, 0.00034218086528426593)
x2 = stats.genextreme.isf(s, 0)
assert_allclose(x2, x)
def test_burr12_ppf_small_arg():
prob = 1e-16
quantile = stats.burr12.ppf(prob, 2, 3)
# The expected quantile was computed using mpmath:
# >>> import mpmath
# >>> prob = mpmath.mpf('1e-16')
# >>> c = mpmath.mpf(2)
# >>> d = mpmath.mpf(3)
# >>> float(((1-q)**(-1/d) - 1)**(1/c))
# 5.7735026918962575e-09
assert_allclose(quantile, 5.7735026918962575e-09)
def test_argus_function():
# There is no usable reference implementation.
# (RooFit implementation returns unreasonable results which are not normalized correctly)
# Instead we do some tests if the distribution behaves as expected for different shapes and scales
for i in range(1, 10):
for j in range(1, 10):
assert_equal(stats.argus.pdf(i + 0.001, chi=j, scale=i), 0.0)
assert_(stats.argus.pdf(i - 0.001, chi=j, scale=i) > 0.0)
assert_equal(stats.argus.pdf(-0.001, chi=j, scale=i), 0.0)
assert_(stats.argus.pdf(+0.001, chi=j, scale=i) > 0.0)
for i in range(1, 10):
assert_equal(stats.argus.cdf(1.0, chi=i), 1.0)
assert_equal(stats.argus.cdf(1.0, chi=i), 1.0 - stats.argus.sf(1.0, chi=i))
class TestHistogram(TestCase):
def setUp(self):
# We have 8 bins
# [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9)
# But actually np.histogram will put the last 9 also in the [8,9) bin!
# Therefore there is a slight difference below for the last bin, from
# what you might have expected.
histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5,
6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
self.template = stats.rv_histogram(histogram)
data = stats.norm.rvs(loc=1.0, scale=2.5,size=10000, random_state=123)
norm_histogram = np.histogram(data, bins=50)
self.norm_template = stats.rv_histogram(norm_histogram)
def test_pdf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0,
2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0,
4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0,
4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0,
3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0])
assert_allclose(self.template.pdf(values), pdf_values)
# Test explicitly the corner cases:
# As stated above the pdf in the bin [8,9) is greater than
# one would naively expect because np.histogram putted the 9
# into the [8,9) bin.
assert_almost_equal(self.template.pdf(8.0), 3.0/25.0)
assert_almost_equal(self.template.pdf(8.5), 3.0/25.0)
# 9 is outside our defined bins [8,9) hence the pdf is already 0
# for a continuous distribution this is fine, because a single value
# does not have a finite probability!
assert_almost_equal(self.template.pdf(9.0), 0.0/25.0)
assert_almost_equal(self.template.pdf(10.0), 0.0/25.0)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.pdf(x),
stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_cdf_ppf(self):
values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5,
5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5])
cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0,
1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0,
6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0,
15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0,
22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0])
assert_allclose(self.template.cdf(values), cdf_values)
# First three and last two values in cdf_value are not unique
assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1])
# Test of cdf and ppf are inverse functions
x = np.linspace(1.0, 9.0, 100)
assert_allclose(self.template.ppf(self.template.cdf(x)), x)
x = np.linspace(0.0, 1.0, 100)
assert_allclose(self.template.cdf(self.template.ppf(x)), x)
x = np.linspace(-2, 2, 10)
assert_allclose(self.norm_template.cdf(x),
stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1)
def test_rvs(self):
N = 10000
sample = self.template.rvs(size=N, random_state=123)
assert_equal(np.sum(sample < 1.0), 0.0)
assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2)
assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1)
assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05)
assert_equal(np.sum(sample > 9.0), 0.0)
def test_munp(self):
for n in range(4):
assert_allclose(self.norm_template._munp(n),
stats.norm._munp(n, 1.0, 2.5), rtol=0.05)
def test_entropy(self):
assert_allclose(self.norm_template.entropy(),
stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/_security_rules_operations.py | 1 | 22399 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations(object):
"""SecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_11_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> "_models.SecurityRule"
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "_models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SecurityRule"]
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2017_11_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_11_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SecurityRuleListResult"]
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_11_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
| mit |
atsaki/ansible-modules-extras | cloud/cloudstack/cs_volume.py | 1 | 13909 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Jefferson Girão <jefferson@girao.net>
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_volume
short_description: Manages volumes on Apache CloudStack based clouds.
description:
- Create, destroy, attach, detach volumes.
version_added: "2.1"
author:
- "Jefferson Girão (@jeffersongirao)"
- "René Moser (@resmo)"
options:
name:
description:
- Name of the volume.
- C(name) can only contain ASCII letters.
required: true
account:
description:
- Account the volume is related to.
required: false
default: null
custom_id:
description:
- Custom id to the resource.
- Allowed to Root Admins only.
required: false
default: null
disk_offering:
description:
- Name of the disk offering to be used.
- Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present).
required: false
default: null
display_volume:
description:
- Whether to display the volume to the end user or not.
- Allowed to Root Admins only.
required: false
default: true
domain:
description:
- Name of the domain the volume to be deployed in.
required: false
default: null
max_iops:
description:
- Max iops
required: false
default: null
min_iops:
description:
- Min iops
required: false
default: null
project:
description:
- Name of the project the volume to be deployed in.
required: false
default: null
size:
description:
- Size of disk in GB
required: false
default: null
snapshot:
description:
- The snapshot name for the disk volume.
- Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present).
required: false
default: null
force:
description:
- Force removal of volume even it is attached to a VM.
- Considered on C(state=absnet) only.
required: false
default: false
vm:
description:
- Name of the virtual machine to attach the volume to.
required: false
default: null
zone:
description:
- Name of the zone in which the volume should be deployed.
- If not set, default zone is used.
required: false
default: null
state:
description:
- State of the volume.
required: false
default: 'present'
choices: [ 'present', 'absent', 'attached', 'detached' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create volume within project, zone with specified storage options
- local_action:
module: cs_volume
name: web-vm-1-volume
project: Integration
zone: ch-zrh-ix-01
disk_offering: PerfPlus Storage
size: 20
# Create/attach volume to instance
- local_action:
module: cs_volume
name: web-vm-1-volume
disk_offering: PerfPlus Storage
size: 20
vm: web-vm-1
state: attached
# Detach volume
- local_action:
module: cs_volume
name: web-vm-1-volume
state: detached
# Remove volume
- local_action:
module: cs_volume
name: web-vm-1-volume
state: absent
'''
RETURN = '''
id:
description: ID of the volume.
returned: success
type: string
sample:
name:
description: Name of the volume.
returned: success
type: string
sample: web-volume-01
display_name:
description: Display name of the volume.
returned: success
type: string
sample: web-volume-01
group:
description: Group the volume belongs to
returned: success
type: string
sample: web
domain:
description: Domain the volume belongs to
returned: success
type: string
sample: example domain
project:
description: Project the volume belongs to
returned: success
type: string
sample: Production
zone:
description: Name of zone the volume is in.
returned: success
type: string
sample: ch-gva-2
created:
description: Date of the volume was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
attached:
description: Date of the volume was attached.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
type:
description: Disk volume type.
returned: success
type: string
sample: DATADISK
size:
description: Size of disk volume.
returned: success
type: string
sample: 20
vm:
description: Name of the vm the volume is attached to (not returned when detached)
returned: success
type: string
sample: web-01
state:
description: State of the volume
returned: success
type: string
sample: Attached
device_id:
description: Id of the device on user vm the volume is attached to (not returned when detached)
returned: success
type: string
sample: 1
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackVolume(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVolume, self).__init__(module)
self.returns = {
'group': 'group',
'attached': 'attached',
'vmname': 'vm',
'deviceid': 'device_id',
'type': 'type',
'size': 'size',
}
self.volume = None
#TODO implement in cloudstack utils
def get_disk_offering(self, key=None):
disk_offering = self.module.params.get('disk_offering')
if not disk_offering:
return None
# Do not add domain filter for disk offering listing.
disk_offerings = self.cs.listDiskOfferings()
if disk_offerings:
for d in disk_offerings['diskoffering']:
if disk_offering in [d['displaytext'], d['name'], d['id']]:
return self._get_by_key(key, d)
self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering)
def get_volume(self):
if not self.volume:
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['displayvolume'] = self.module.params.get('display_volume')
args['type'] = 'DATADISK'
volumes = self.cs.listVolumes(**args)
if volumes:
volume_name = self.module.params.get('name')
for v in volumes['volume']:
if volume_name.lower() == v['name'].lower():
self.volume = v
break
return self.volume
def get_snapshot(self, key=None):
snapshot = self.module.params.get('snapshot')
if not snapshot:
return None
args = {}
args['name'] = snapshot
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
args['projectid'] = self.get_project('id')
snapshots = self.cs.listSnapshots(**args)
if snapshots:
return self._get_by_key(key, snapshots['snapshot'][0])
self.module.fail_json(msg="Snapshot with name %s not found" % snapshot)
def present_volume(self):
volume = self.get_volume()
if not volume:
disk_offering_id = self.get_disk_offering(key='id')
snapshot_id = self.get_snapshot(key='id')
if not disk_offering_id and not snapshot_id:
self.module.fail_json(msg="Required one of: disk_offering,snapshot")
self.result['changed'] = True
args = {}
args['name'] = self.module.params.get('name')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['diskofferingid'] = disk_offering_id
args['displayvolume'] = self.module.params.get('display_volume')
args['maxiops'] = self.module.params.get('max_iops')
args['miniops'] = self.module.params.get('min_iops')
args['projectid'] = self.get_project(key='id')
args['size'] = self.module.params.get('size')
args['snapshotid'] = snapshot_id
args['zoneid'] = self.get_zone(key='id')
if not self.module.check_mode:
res = self.cs.createVolume(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
return volume
def attached_volume(self):
volume = self.present_volume()
if volume.get('virtualmachineid') != self.get_vm(key='id'):
self.result['changed'] = True
if not self.module.check_mode:
volume = self.detached_volume()
if 'attached' not in volume:
self.result['changed'] = True
args = {}
args['id'] = volume['id']
args['virtualmachineid'] = self.get_vm(key='id')
args['deviceid'] = self.module.params.get('device_id')
if not self.module.check_mode:
res = self.cs.attachVolume(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
return volume
def detached_volume(self):
volume = self.present_volume()
if volume:
if 'attached' not in volume:
return volume
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.detachVolume(id=volume['id'])
if 'errortext' in volume:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
return volume
def absent_volume(self):
volume = self.get_volume()
if volume:
if 'attached' in volume:
if self.module.param.get('force'):
self.detached_volume()
else:
self.module.fail_json(msg="Volume '%s' is attached, use force=true for detaching and removing the volume." % volume.get('name'))
self.result['changed'] = True
if not self.module.check_mode:
volume = self.detached_volume()
res = self.cs.deleteVolume(id=volume['id'])
if 'errortext' in volume:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self.poll_job(res, 'volume')
return volume
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
disk_offering = dict(default=None),
display_volume = dict(choices=BOOLEANS, default=True),
max_iops = dict(type='int', default=None),
min_iops = dict(type='int', default=None),
size = dict(type='int', default=None),
snapshot = dict(default=None),
vm = dict(default=None),
device_id = dict(type='int', default=None),
custom_id = dict(default=None),
force = dict(choices=BOOLEANS, default=False),
state = dict(choices=['present', 'absent', 'attached', 'detached'], default='present'),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
mutually_exclusive = (
['snapshot', 'disk_offering'],
),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_vol = AnsibleCloudStackVolume(module)
state = module.params.get('state')
if state in ['absent']:
volume = acs_vol.absent_volume()
elif state in ['attached']:
volume = acs_vol.attached_volume()
elif state in ['detached']:
volume = acs_vol.detached_volume()
else:
volume = acs_vol.present_volume()
result = acs_vol.get_result(volume)
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
andykimpe/chromium-test-npapi | tools/telemetry/third_party/pyserial/serial/tools/list_ports_linux.py | 28 | 5091 | #!/usr/bin/env python
# portable serial port access with python
#
# This is a module that gathers a list of serial ports including details on
# GNU/Linux systems
#
# (C) 2011-2013 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
import glob
import sys
import os
import re
try:
import subprocess
except ImportError:
def popen(argv):
try:
si, so = os.popen4(' '.join(argv))
return so.read().strip()
except:
raise IOError('lsusb failed')
else:
def popen(argv):
try:
return subprocess.check_output(argv, stderr=subprocess.STDOUT).strip()
except:
raise IOError('lsusb failed')
# The comports function is expected to return an iterable that yields tuples of
# 3 strings: port name, human readable description and a hardware ID.
#
# as currently no method is known to get the second two strings easily, they
# are currently just identical to the port name.
# try to detect the OS so that a device can be selected...
plat = sys.platform.lower()
def read_line(filename):
"""help function to read a single line from a file. returns none"""
try:
f = open(filename)
line = f.readline().strip()
f.close()
return line
except IOError:
return None
def re_group(regexp, text):
"""search for regexp in text, return 1st group on match"""
if sys.version < '3':
m = re.search(regexp, text)
else:
# text is bytes-like
m = re.search(regexp, text.decode('ascii', 'replace'))
if m: return m.group(1)
# try to extract descriptions from sysfs. this was done by experimenting,
# no guarantee that it works for all devices or in the future...
def usb_sysfs_hw_string(sysfs_path):
"""given a path to a usb device in sysfs, return a string describing it"""
bus, dev = os.path.basename(os.path.realpath(sysfs_path)).split('-')
snr = read_line(sysfs_path+'/serial')
if snr:
snr_txt = ' SNR=%s' % (snr,)
else:
snr_txt = ''
return 'USB VID:PID=%s:%s%s' % (
read_line(sysfs_path+'/idVendor'),
read_line(sysfs_path+'/idProduct'),
snr_txt
)
def usb_lsusb_string(sysfs_path):
base = os.path.basename(os.path.realpath(sysfs_path))
bus = base.split('-')[0]
try:
dev = int(read_line(os.path.join(sysfs_path, 'devnum')))
desc = popen(['lsusb', '-v', '-s', '%s:%s' % (bus, dev)])
# descriptions from device
iManufacturer = re_group('iManufacturer\s+\w+ (.+)', desc)
iProduct = re_group('iProduct\s+\w+ (.+)', desc)
iSerial = re_group('iSerial\s+\w+ (.+)', desc) or ''
# descriptions from kernel
idVendor = re_group('idVendor\s+0x\w+ (.+)', desc)
idProduct = re_group('idProduct\s+0x\w+ (.+)', desc)
# create descriptions. prefer text from device, fall back to the others
return '%s %s %s' % (iManufacturer or idVendor, iProduct or idProduct, iSerial)
except IOError:
return base
def describe(device):
"""\
Get a human readable description.
For USB-Serial devices try to run lsusb to get a human readable description.
For USB-CDC devices read the description from sysfs.
"""
base = os.path.basename(device)
# USB-Serial devices
sys_dev_path = '/sys/class/tty/%s/device/driver/%s' % (base, base)
if os.path.exists(sys_dev_path):
sys_usb = os.path.dirname(os.path.dirname(os.path.realpath(sys_dev_path)))
return usb_lsusb_string(sys_usb)
# USB-CDC devices
sys_dev_path = '/sys/class/tty/%s/device/interface' % (base,)
if os.path.exists(sys_dev_path):
return read_line(sys_dev_path)
return base
def hwinfo(device):
"""Try to get a HW identification using sysfs"""
base = os.path.basename(device)
if os.path.exists('/sys/class/tty/%s/device' % (base,)):
# PCI based devices
sys_id_path = '/sys/class/tty/%s/device/id' % (base,)
if os.path.exists(sys_id_path):
return read_line(sys_id_path)
# USB-Serial devices
sys_dev_path = '/sys/class/tty/%s/device/driver/%s' % (base, base)
if os.path.exists(sys_dev_path):
sys_usb = os.path.dirname(os.path.dirname(os.path.realpath(sys_dev_path)))
return usb_sysfs_hw_string(sys_usb)
# USB-CDC devices
if base.startswith('ttyACM'):
sys_dev_path = '/sys/class/tty/%s/device' % (base,)
if os.path.exists(sys_dev_path):
return usb_sysfs_hw_string(sys_dev_path + '/..')
return 'n/a' # XXX directly remove these from the list?
def comports():
devices = glob.glob('/dev/ttyS*') + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*')
return [(d, describe(d), hwinfo(d)) for d in devices]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# test
if __name__ == '__main__':
for port, desc, hwid in sorted(comports()):
print "%s: %s [%s]" % (port, desc, hwid)
| bsd-3-clause |
instantchow/home-assistant | tests/components/thermostat/test_heat_control.py | 1 | 5422 | """The tests for the heat control thermostat."""
import unittest
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_OFF,
TEMP_CELCIUS,
)
from homeassistant.components import thermostat
from tests.common import get_test_home_assistant
entity = 'thermostat.test'
ent_sensor = 'sensor.test'
ent_switch = 'switch.test'
min_temp = 3.0
max_temp = 65.0
target_temp = 42.0
class TestThermostatHeatControl(unittest.TestCase):
"""Test the Heat Control thermostat."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.config.temperature_unit = TEMP_CELCIUS
thermostat.setup(self.hass, {'thermostat': {
'platform': 'heat_control',
'name': 'test',
'heater': ent_switch,
'target_sensor': ent_sensor
}})
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_defaults_to_unknown(self):
"""Test the setting of defaults to unknown."""
self.assertEqual('unknown', self.hass.states.get(entity).state)
def test_default_setup_params(self):
"""Test the setup with default parameters."""
state = self.hass.states.get(entity)
self.assertEqual(7, state.attributes.get('min_temp'))
self.assertEqual(35, state.attributes.get('max_temp'))
self.assertEqual(None, state.attributes.get('temperature'))
def test_custom_setup_params(self):
"""Test the setup with custom parameters."""
thermostat.setup(self.hass, {'thermostat': {
'platform': 'heat_control',
'name': 'test',
'heater': ent_switch,
'target_sensor': ent_sensor,
'min_temp': min_temp,
'max_temp': max_temp,
'target_temp': target_temp
}})
state = self.hass.states.get(entity)
self.assertEqual(min_temp, state.attributes.get('min_temp'))
self.assertEqual(max_temp, state.attributes.get('max_temp'))
self.assertEqual(target_temp, state.attributes.get('temperature'))
self.assertEqual(str(target_temp), self.hass.states.get(entity).state)
def test_set_target_temp(self):
"""Test the setting of the target temperature."""
thermostat.set_temperature(self.hass, 30)
self.hass.pool.block_till_done()
self.assertEqual('30.0', self.hass.states.get(entity).state)
def test_set_target_temp_turns_on_heater(self):
"""Test if target temperature turn heater on."""
self._setup_switch(False)
self._setup_sensor(25)
self.hass.pool.block_till_done()
thermostat.set_temperature(self.hass, 30)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ent_switch, call.data['entity_id'])
def test_set_target_temp_turns_off_heater(self):
"""Test if target temperature turn heater off."""
self._setup_switch(True)
self._setup_sensor(30)
self.hass.pool.block_till_done()
thermostat.set_temperature(self.hass, 25)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ent_switch, call.data['entity_id'])
def test_set_temp_change_turns_on_heater(self):
"""Test if temperature change turn heater on."""
self._setup_switch(False)
thermostat.set_temperature(self.hass, 30)
self.hass.pool.block_till_done()
self._setup_sensor(25)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_ON, call.service)
self.assertEqual(ent_switch, call.data['entity_id'])
def test_temp_change_turns_off_heater(self):
"""Test if temperature change turn heater off."""
self._setup_switch(True)
thermostat.set_temperature(self.hass, 25)
self.hass.pool.block_till_done()
self._setup_sensor(30)
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
call = self.calls[0]
self.assertEqual('switch', call.domain)
self.assertEqual(SERVICE_TURN_OFF, call.service)
self.assertEqual(ent_switch, call.data['entity_id'])
def _setup_sensor(self, temp, unit=TEMP_CELCIUS):
"""Setup the test sensor."""
self.hass.states.set(ent_sensor, temp, {
ATTR_UNIT_OF_MEASUREMENT: unit
})
def _setup_switch(self, is_on):
"""Setup the test switch."""
self.hass.states.set(ent_switch, STATE_ON if is_on else STATE_OFF)
self.calls = []
def log_call(call):
"""Log service calls."""
self.calls.append(call)
self.hass.services.register('switch', SERVICE_TURN_ON, log_call)
self.hass.services.register('switch', SERVICE_TURN_OFF, log_call)
| mit |
Dapid/numpy | doc/example.py | 81 | 3581 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
hroark13/android_kernel_lge_f6x | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
johnmgregoire/PythonCompositionPlots | myternarydemo_hsvcompdiff.py | 1 | 1031 | from myternaryutility import TernaryPlot
import matplotlib.cm as cm
import numpy
import pylab, copy
from colorsys import rgb_to_hsv
from colorsys import hsv_to_rgb
pylab.figure(figsize=(6, 3))
ax=pylab.gca()
#stp = TernaryPlot(ax, ellabels=['Au', 'Cu', 'Si'])
stp = TernaryPlot(ax, ellabels=['A', 'B', 'C'])
stp.grid(nintervals=10, printticklabels=[4])
stp.label(fontsize=12)
comps=numpy.random.rand(50, 3)
comps/=comps.sum(axis=1)[:, numpy.newaxis]
#compdist=(numpy.random.rand(len(comps), 3)-0.5)/5
comps2=copy.copy(comps)
comps2[:, 2]+=.5
comps2/=comps2.sum(axis=1)[:, numpy.newaxis]
#compsdiff=comps2-comps
#
#terncoord=numpy.float64(comps)
#terncoord2=numpy.float64(comps2)
#
#
#
#sat = ((compsdiff**2).sum(axis=1)/2.)**.5
#
#huelist=[0. if cd.sum()==0. else rgb_to_hsv(*(cd/cd.sum()))[0] for cd in numpy.abs(compsdiff)]
#
#sat_norm=sat/max(sat)
#
#rgblist=[hsv_to_rgb(h, s, 1) for h, s in zip(huelist, sat_norm)]
#rgb_arr=stp.complex_to_rgb(ang, sat_norm)
stp.hsdiffplot(comps, comps2)
#
#
pylab.show()
print 'done'
| bsd-3-clause |
appapantula/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
caveman-dick/ansible | lib/ansible/modules/cloud/amazon/cloudtrail.py | 22 | 23409 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudtrail
short_description: manage CloudTrail create, delete, update
description:
- Creates, deletes, or updates CloudTrail configuration. Ensures logging is also enabled.
version_added: "2.0"
author:
- "Ansible Core Team"
- "Ted Timmons"
- "Daniel Shepherd (@shepdelacreme)"
requirements:
- boto3
- botocore
options:
state:
description:
- Add or remove CloudTrail configuration.
- The following states have been preserved for backwards compatibility. C(state=enabled) and C(state=disabled).
- enabled=present and disabled=absent.
required: true
choices: ['present', 'absent', 'enabled', 'disabled']
name:
description:
- Name for the CloudTrail.
- Names are unique per-region unless the CloudTrail is a mulit-region trail, in which case it is unique per-account.
required: true
enable_logging:
description:
- Start or stop the CloudTrail logging. If stopped the trail will be paused and will not record events or deliver log files.
default: true
version_added: "2.4"
s3_bucket_name:
description:
- An existing S3 bucket where CloudTrail will deliver log files.
- This bucket should exist and have the proper policy.
- See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
- Required when C(state=present)
version_added: "2.4"
s3_key_prefix:
description:
- S3 Key prefix for delivered log files. A trailing slash is not necessary and will be removed.
is_multi_region_trail:
description:
- Specify whether the trail belongs only to one region or exists in all regions.
default: false
version_added: "2.4"
enable_log_file_validation:
description:
- Specifies whether log file integrity validation is enabled.
- CloudTrail will create a hash for every log file delivered and produce a signed digest file that can be used to ensure log files have not been tampered.
default: false
version_added: "2.4"
include_global_events:
description:
- Record API calls from global services such as IAM and STS.
default: true
sns_topic_name:
description:
- SNS Topic name to send notifications to when a log file is delivered
version_added: "2.4"
cloudwatch_logs_role_arn:
description:
- Specifies a full ARN for an IAM role that assigns the proper permissions for CloudTrail to create and write to the log group listed below.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html)
- "Example arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role"
- Required when C(cloudwatch_logs_log_group_arn)
version_added: "2.4"
cloudwatch_logs_log_group_arn:
description:
- A full ARN specifying a valid CloudWatch log group to which CloudTrail logs will be delivered. The log group should already exist.
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/send-cloudtrail-events-to-cloudwatch-logs.html)
- "Example arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*"
- Required when C(cloudwatch_logs_role_arn)
version_added: "2.4"
kms_key_id:
description:
- Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. This also has the effect of enabling log file encryption.
- The value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully specified ARN to a key, or a globally unique identifier.
- Examples
- alias/MyAliasName
- "arn:aws:kms:us-east-1:123456789012:alias/MyAliasName"
- "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
- 12345678-1234-1234-1234-123456789012
- See U(https://docs.aws.amazon.com/awscloudtrail/latest/userguide/encrypting-cloudtrail-log-files-with-aws-kms.html)
version_added: "2.4"
tags:
description:
- A hash/dictionary of tags to be applied to the CloudTrail resource.
- Remove completely or specify an empty dictionary to remove all tags.
default: {}
version_added: "2.4"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: create single region cloudtrail
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
s3_key_prefix: cloudtrail
region: us-east-1
- name: create multi-region trail with validation and tags
cloudtrail:
state: present
name: default
s3_bucket_name: mylogbucket
region: us-east-1
is_multi_region_trail: true
enable_log_file_validation: true
tags:
environment: dev
Name: default
- name: pause logging the trail we just created
cloudtrail:
state: present
name: default
enable_logging: false
s3_bucket_name: mylogbucket
region: us-east-1
is_multi_region_trail: true
enable_log_file_validation: true
tags:
environment: dev
Name: default
- name: delete a trail
cloudtrail:
state: absent
name: default
'''
RETURN = '''
exists:
description: whether the resource exists
returned: always
type: bool
sample: true
trail:
description: CloudTrail resource details
returned: always
type: complex
sample: hash/dictionary of values
contains:
trail_arn:
description: Full ARN of the CloudTrail resource
returned: success
type: string
sample: arn:aws:cloudtrail:us-east-1:123456789012:trail/default
name:
description: Name of the CloudTrail resource
returned: success
type: string
sample: default
is_logging:
description: Whether logging is turned on or paused for the Trail
returned: success
type: bool
sample: True
s3_bucket_name:
description: S3 bucket name where log files are delivered
returned: success
type: string
sample: myBucket
s3_key_prefix:
description: Key prefix in bucket where log files are delivered (if any)
returned: success when present
type: string
sample: myKeyPrefix
log_file_validation_enabled:
description: Whether log file validation is enabled on the trail
returned: success
type: bool
sample: true
include_global_service_events:
description: Whether global services (IAM, STS) are logged with this trail
returned: success
type: bool
sample: true
is_multi_region_trail:
description: Whether the trail applies to all regions or just one
returned: success
type: bool
sample: true
has_custom_event_selectors:
description: Whether any custom event selectors are used for this trail.
returned: success
type: bool
sample: False
home_region:
description: The home region where the trail was originally created and must be edited.
returned: success
type: string
sample: us-east-1
sns_topic_name:
description: The SNS topic name where log delivery notifications are sent.
returned: success when present
type: string
sample: myTopic
sns_topic_arn:
description: Full ARN of the SNS topic where log delivery notifications are sent.
returned: success when present
type: string
sample: arn:aws:sns:us-east-1:123456789012:topic/myTopic
cloud_watch_logs_log_group_arn:
description: Full ARN of the CloudWatch Logs log group where events are delivered.
returned: success when present
type: string
sample: arn:aws:logs:us-east-1:123456789012:log-group:CloudTrail/DefaultLogGroup:*
cloud_watch_logs_role_arn:
description: Full ARN of the IAM role that CloudTrail assumes to deliver events.
returned: success when present
type: string
sample: arn:aws:iam::123456789012:role/CloudTrail_CloudWatchLogs_Role
kms_key_id:
description: Full ARN of the KMS Key used to encrypt log files.
returned: success when present
type: string
sample: arn:aws:kms::123456789012:key/12345678-1234-1234-1234-123456789012
tags:
description: hash/dictionary of tags applied to this resource
returned: success
type: dict
sample: {'environment': 'dev', 'Name': 'default'}
'''
import traceback
try:
from botocore.exceptions import ClientError
except ImportError:
# Handled in main() by imported HAS_BOTO3
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, ec2_argument_spec, get_aws_connection_info,
HAS_BOTO3, ansible_dict_to_boto3_tag_list,
boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict)
def create_trail(module, client, ct_params):
"""
Creates a CloudTrail
module : AnsibleModule object
client : boto3 client connection object
ct_params : The parameters for the Trail to create
"""
resp = {}
try:
resp = client.create_trail(**ct_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
return resp
def tag_trail(module, client, tags, trail_arn, curr_tags=None, dry_run=False):
"""
Creates, updates, removes tags on a CloudTrail resource
module : AnsibleModule object
client : boto3 client connection object
tags : Dict of tags converted from ansible_dict to boto3 list of dicts
trail_arn : The ARN of the CloudTrail to operate on
curr_tags : Dict of the current tags on resource, if any
dry_run : true/false to determine if changes will be made if needed
"""
adds = []
removes = []
updates = []
changed = False
if curr_tags is None:
# No current tags so just convert all to a tag list
adds = ansible_dict_to_boto3_tag_list(tags)
else:
curr_keys = set(curr_tags.keys())
new_keys = set(tags.keys())
add_keys = new_keys - curr_keys
remove_keys = curr_keys - new_keys
update_keys = dict()
for k in curr_keys.intersection(new_keys):
if curr_tags[k] != tags[k]:
update_keys.update({k: tags[k]})
adds = get_tag_list(add_keys, tags)
removes = get_tag_list(remove_keys, curr_tags)
updates = get_tag_list(update_keys, tags)
if removes or updates:
changed = True
if not dry_run:
try:
client.remove_tags(ResourceId=trail_arn, TagsList=removes + updates)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
if updates or adds:
changed = True
if not dry_run:
try:
client.add_tags(ResourceId=trail_arn, TagsList=updates + adds)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
return changed
def get_tag_list(keys, tags):
"""
Returns a list of dicts with tags to act on
keys : set of keys to get the values for
tags : the dict of tags to turn into a list
"""
tag_list = []
for k in keys:
tag_list.append({'Key': k, 'Value': tags[k]})
return tag_list
def set_logging(module, client, name, action):
"""
Starts or stops logging based on given state
module : AnsibleModule object
client : boto3 client connection object
name : The name or ARN of the CloudTrail to operate on
action : start or stop
"""
if action == 'start':
try:
client.start_logging(Name=name)
return client.get_trail_status(Name=name)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
elif action == 'stop':
try:
client.stop_logging(Name=name)
return client.get_trail_status(Name=name)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
else:
module.fail_json(msg="Unsupported logging action")
def get_trail_facts(module, client, name):
"""
Describes existing trail in an account
module : AnsibleModule object
client : boto3 client connection object
name : Name of the trail
"""
# get Trail info
try:
trail_resp = client.describe_trails(trailNameList=[name])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
# Now check to see if our trail exists and get status and tags
if len(trail_resp['trailList']):
trail = trail_resp['trailList'][0]
try:
status_resp = client.get_trail_status(Name=trail['Name'])
tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
trail['IsLogging'] = status_resp['IsLogging']
trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList'])
# Check for non-existent values and populate with None
optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId'])
for v in optional_vals - set(trail.keys()):
trail[v] = None
return trail
else:
# trail doesn't exist return None
return None
def delete_trail(module, client, trail_arn):
"""
Delete a CloudTrail
module : AnsibleModule object
client : boto3 client connection object
trail_arn : Full CloudTrail ARN
"""
try:
client.delete_trail(Name=trail_arn)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
def update_trail(module, client, ct_params):
"""
Delete a CloudTrail
module : AnsibleModule object
client : boto3 client connection object
ct_params : The parameters for the Trail to update
"""
try:
client.update_trail(**ct_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
name=dict(default='default'),
enable_logging=dict(default=True, type='bool'),
s3_bucket_name=dict(),
s3_key_prefix=dict(),
sns_topic_name=dict(),
is_multi_region_trail=dict(default=False, type='bool'),
enable_log_file_validation=dict(default=False, type='bool'),
include_global_events=dict(default=True, type='bool'),
cloudwatch_logs_role_arn=dict(),
cloudwatch_logs_log_group_arn=dict(),
kms_key_id=dict(),
tags=dict(default={}, type='dict'),
))
required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module')
# collect parameters
if module.params['state'] in ('present', 'enabled'):
state = 'present'
elif module.params['state'] in ('absent', 'disabled'):
state = 'absent'
tags = module.params['tags']
enable_logging = module.params['enable_logging']
ct_params = dict(
Name=module.params['name'],
S3BucketName=module.params['s3_bucket_name'],
IncludeGlobalServiceEvents=module.params['include_global_events'],
IsMultiRegionTrail=module.params['is_multi_region_trail'],
EnableLogFileValidation=module.params['enable_log_file_validation'],
S3KeyPrefix='',
SnsTopicName='',
CloudWatchLogsRoleArn='',
CloudWatchLogsLogGroupArn='',
KmsKeyId=''
)
if module.params['s3_key_prefix']:
ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')
if module.params['sns_topic_name']:
ct_params['SnsTopicName'] = module.params['sns_topic_name']
if module.params['cloudwatch_logs_role_arn']:
ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']
if module.params['cloudwatch_logs_log_group_arn']:
ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']
if module.params['kms_key_id']:
ct_params['KmsKeyId'] = module.params['kms_key_id']
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='cloudtrail', region=region, endpoint=ec2_url, **aws_connect_params)
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
results = dict(
changed=False,
exists=False
)
# Get existing trail facts
trail = get_trail_facts(module, client, ct_params['Name'])
# If the trail exists set the result exists variable
if trail is not None:
results['exists'] = True
if state == 'absent' and results['exists']:
# If Trail exists go ahead and delete
results['changed'] = True
results['exists'] = False
results['trail'] = dict()
if not module.check_mode:
delete_trail(module, client, trail['TrailARN'])
elif state == 'present' and results['exists']:
# If Trail exists see if we need to update it
do_update = False
for key in ct_params:
tkey = str(key)
# boto3 has inconsistent parameter naming so we handle it here
if key == 'EnableLogFileValidation':
tkey = 'LogFileValidationEnabled'
# We need to make an empty string equal None
if ct_params.get(key) == '':
val = None
else:
val = ct_params.get(key)
if val != trail.get(tkey):
do_update = True
results['changed'] = True
# If we are in check mode copy the changed values to the trail facts in result output to show what would change.
if module.check_mode:
trail.update({tkey: ct_params.get(key)})
if not module.check_mode and do_update:
update_trail(module, client, ct_params)
trail = get_trail_facts(module, client, ct_params['Name'])
# Check if we need to start/stop logging
if enable_logging and not trail['IsLogging']:
results['changed'] = True
trail['IsLogging'] = True
if not module.check_mode:
set_logging(module, client, name=ct_params['Name'], action='start')
if not enable_logging and trail['IsLogging']:
results['changed'] = True
trail['IsLogging'] = False
if not module.check_mode:
set_logging(module, client, name=ct_params['Name'], action='stop')
# Check if we need to update tags on resource
tag_dry_run = False
if module.check_mode:
tag_dry_run = True
tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run)
if tags_changed:
results['changed'] = True
trail['tags'] = tags
# Populate trail facts in output
results['trail'] = camel_dict_to_snake_dict(trail)
elif state == 'present' and not results['exists']:
# Trail doesn't exist just go create it
results['changed'] = True
if not module.check_mode:
# If we aren't in check_mode then actually create it
created_trail = create_trail(module, client, ct_params)
# Apply tags
tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN'])
# Get the trail status
try:
status_resp = client.get_trail_status(Name=created_trail['Name'])
except ClientError as err:
module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response))
# Set the logging state for the trail to desired value
if enable_logging and not status_resp['IsLogging']:
set_logging(module, client, name=ct_params['Name'], action='start')
if not enable_logging and status_resp['IsLogging']:
set_logging(module, client, name=ct_params['Name'], action='stop')
# Get facts for newly created Trail
trail = get_trail_facts(module, client, ct_params['Name'])
# If we are in check mode create a fake return structure for the newly minted trail
if module.check_mode:
acct_id = '123456789012'
try:
sts_client = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_params)
acct_id = sts_client.get_caller_identity()['Account']
except ClientError:
pass
trail = dict()
trail.update(ct_params)
trail['LogFileValidationEnabled'] = ct_params['EnableLogFileValidation']
trail.pop('EnableLogFileValidation')
fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name']
trail['HasCustomEventSelectors'] = False
trail['HomeRegion'] = region
trail['TrailARN'] = fake_arn
trail['IsLogging'] = enable_logging
trail['tags'] = tags
# Populate trail facts in output
results['trail'] = camel_dict_to_snake_dict(trail)
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
eneldoserrata/marcos_openerp | addons/report_geraldo/lib/geraldo/site/newsite/django_1_0/tests/regressiontests/queries/models.py | 9 | 30294 | """
Various complex queries that have been problematic in the past.
"""
import datetime
import pickle
import sys
from django.db import models
from django.db.models.query import Q, ITER_CHUNK_SIZE
# Python 2.3 doesn't have sorted()
try:
sorted
except NameError:
from django.utils.itercompat import sorted
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
def __unicode__(self):
return self.name
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __unicode__(self):
return self.note
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __unicode__(self):
return self.name
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
class Meta:
ordering = ['info']
def __unicode__(self):
return self.info
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
def __unicode__(self):
return self.name
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True, null=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __unicode__(self):
return self.name
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num', null=True)
def __unicode__(self):
return self.name
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __unicode__(self):
return '%d: %s' % (self.rank, self.author.name)
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __unicode__(self):
return self.title
class Number(models.Model):
num = models.IntegerField()
def __unicode__(self):
return unicode(self.num)
# Symmetrical m2m field with a normal field using the reverse accesor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_query_set(self):
qs = super(CustomManager, self).get_query_set()
return qs.filter(public=True, tag__name='t1')
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __unicode__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_query_set(self):
return super(MemberManager, self).get_query_set().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", null=True, unique=True)
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity)
# Multiple foreign keys
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __unicode__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA)
b = models.ForeignKey(LeafB)
__test__ = {'API_TESTS':"""
>>> t1 = Tag.objects.create(name='t1')
>>> t2 = Tag.objects.create(name='t2', parent=t1)
>>> t3 = Tag.objects.create(name='t3', parent=t1)
>>> t4 = Tag.objects.create(name='t4', parent=t3)
>>> t5 = Tag.objects.create(name='t5', parent=t3)
>>> n1 = Note.objects.create(note='n1', misc='foo')
>>> n2 = Note.objects.create(note='n2', misc='bar')
>>> n3 = Note.objects.create(note='n3', misc='foo')
Create these out of order so that sorting by 'id' will be different to sorting
by 'info'. Helps detect some problems later.
>>> e2 = ExtraInfo.objects.create(info='e2', note=n2)
>>> e1 = ExtraInfo.objects.create(info='e1', note=n1)
>>> a1 = Author.objects.create(name='a1', num=1001, extra=e1)
>>> a2 = Author.objects.create(name='a2', num=2002, extra=e1)
>>> a3 = Author.objects.create(name='a3', num=3003, extra=e2)
>>> a4 = Author.objects.create(name='a4', num=4004, extra=e2)
>>> time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
>>> time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
>>> time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
>>> time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
>>> i1 = Item.objects.create(name='one', created=time1, modified=time1, creator=a1, note=n3)
>>> i1.tags = [t1, t2]
>>> i2 = Item.objects.create(name='two', created=time2, creator=a2, note=n2)
>>> i2.tags = [t1, t3]
>>> i3 = Item.objects.create(name='three', created=time3, creator=a2, note=n3)
>>> i4 = Item.objects.create(name='four', created=time4, creator=a4, note=n3)
>>> i4.tags = [t4]
>>> r1 = Report.objects.create(name='r1', creator=a1)
>>> r2 = Report.objects.create(name='r2', creator=a3)
>>> r3 = Report.objects.create(name='r3')
Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
will be rank3, rank2, rank1.
>>> rank1 = Ranking.objects.create(rank=2, author=a2)
>>> rank2 = Ranking.objects.create(rank=1, author=a3)
>>> rank3 = Ranking.objects.create(rank=3, author=a1)
>>> c1 = Cover.objects.create(title="first", item=i4)
>>> c2 = Cover.objects.create(title="second", item=i2)
>>> num1 = Number.objects.create(num=4)
>>> num2 = Number.objects.create(num=8)
>>> num3 = Number.objects.create(num=12)
Bug #1050
>>> Item.objects.filter(tags__isnull=True)
[<Item: three>]
>>> Item.objects.filter(tags__id__isnull=True)
[<Item: three>]
Bug #1801
>>> Author.objects.filter(item=i2)
[<Author: a2>]
>>> Author.objects.filter(item=i3)
[<Author: a2>]
>>> Author.objects.filter(item=i2) & Author.objects.filter(item=i3)
[<Author: a2>]
Bug #2306
Checking that no join types are "left outer" joins.
>>> query = Item.objects.filter(tags=t2).query
>>> query.LOUTER not in [x[2] for x in query.alias_map.values()]
True
>>> Item.objects.filter(Q(tags=t1)).order_by('name')
[<Item: one>, <Item: two>]
>>> Item.objects.filter(Q(tags=t1)).filter(Q(tags=t2))
[<Item: one>]
>>> Item.objects.filter(Q(tags=t1)).filter(Q(creator__name='fred')|Q(tags=t2))
[<Item: one>]
Each filter call is processed "at once" against a single table, so this is
different from the previous example as it tries to find tags that are two
things at once (rather than two tags).
>>> Item.objects.filter(Q(tags=t1) & Q(tags=t2))
[]
>>> Item.objects.filter(Q(tags=t1), Q(creator__name='fred')|Q(tags=t2))
[]
>>> qs = Author.objects.filter(ranking__rank=2, ranking__id=rank1.id)
>>> list(qs)
[<Author: a2>]
>>> qs.query.count_active_tables()
2
>>> qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=rank1.id)
>>> qs.query.count_active_tables()
3
Bug #4464
>>> Item.objects.filter(tags=t1).filter(tags=t2)
[<Item: one>]
>>> Item.objects.filter(tags__in=[t1, t2]).distinct().order_by('name')
[<Item: one>, <Item: two>]
>>> Item.objects.filter(tags__in=[t1, t2]).filter(tags=t3)
[<Item: two>]
Bug #2080, #3592
>>> Author.objects.filter(item__name='one') | Author.objects.filter(name='a3')
[<Author: a1>, <Author: a3>]
>>> Author.objects.filter(Q(item__name='one') | Q(name='a3'))
[<Author: a1>, <Author: a3>]
>>> Author.objects.filter(Q(name='a3') | Q(item__name='one'))
[<Author: a1>, <Author: a3>]
>>> Author.objects.filter(Q(item__name='three') | Q(report__name='r3'))
[<Author: a2>]
Bug #4289
A slight variation on the above theme: restricting the choices by the lookup
constraints.
>>> Number.objects.filter(num__lt=4)
[]
>>> Number.objects.filter(num__gt=8, num__lt=12)
[]
>>> Number.objects.filter(num__gt=8, num__lt=13)
[<Number: 12>]
>>> Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12))
[]
>>> Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4))
[]
>>> Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4))
[]
>>> Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4))
[<Number: 8>]
Bug #7872
Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object releated to the LeafA we create.
>>> LeafA.objects.create(data='first')
<LeafA: first>
>>> LeafA.objects.filter(Q(data='first')|Q(join__b__data='second'))
[<LeafA: first>]
Bug #6074
Merging two empty result sets shouldn't leave a queryset with no constraints
(which would match everything).
>>> Author.objects.filter(Q(id__in=[]))
[]
>>> Author.objects.filter(Q(id__in=[])|Q(id__in=[]))
[]
Bug #1878, #2939
>>> Item.objects.values('creator').distinct().count()
3
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
>>> xx = Item(name='four', created=time1, creator=a2, note=n1)
>>> xx.save()
>>> Item.objects.exclude(name='two').values('creator', 'name').distinct().count()
4
>>> Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name', 'foo').distinct().count()
4
>>> Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name').distinct().count()
4
>>> xx.delete()
Bug #7323
>>> Item.objects.values('creator', 'name').count()
4
Bug #2253
>>> q1 = Item.objects.order_by('name')
>>> q2 = Item.objects.filter(id=i1.id)
>>> q1
[<Item: four>, <Item: one>, <Item: three>, <Item: two>]
>>> q2
[<Item: one>]
>>> (q1 | q2).order_by('name')
[<Item: four>, <Item: one>, <Item: three>, <Item: two>]
>>> (q1 & q2).order_by('name')
[<Item: one>]
# FIXME: This is difficult to fix and very much an edge case, so punt for now.
# # This is related to the order_by() tests, below, but the old bug exhibited
# # itself here (q2 was pulling too many tables into the combined query with the
# # new ordering, but only because we have evaluated q2 already).
# >>> len((q1 & q2).order_by('name').query.tables)
# 1
>>> q1 = Item.objects.filter(tags=t1)
>>> q2 = Item.objects.filter(note=n3, tags=t2)
>>> q3 = Item.objects.filter(creator=a4)
>>> ((q1 & q2) | q3).order_by('name')
[<Item: four>, <Item: one>]
Bugs #4088, #4306
>>> Report.objects.filter(creator=1001)
[<Report: r1>]
>>> Report.objects.filter(creator__num=1001)
[<Report: r1>]
>>> Report.objects.filter(creator__id=1001)
[]
>>> Report.objects.filter(creator__id=a1.id)
[<Report: r1>]
>>> Report.objects.filter(creator__name='a1')
[<Report: r1>]
Bug #4510
>>> Author.objects.filter(report__name='r1')
[<Author: a1>]
Bug #7378
>>> a1.report_set.all()
[<Report: r1>]
Bug #5324, #6704
>>> Item.objects.filter(tags__name='t4')
[<Item: four>]
>>> Item.objects.exclude(tags__name='t4').order_by('name').distinct()
[<Item: one>, <Item: three>, <Item: two>]
>>> Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse()
[<Item: two>, <Item: three>, <Item: one>]
>>> Author.objects.exclude(item__name='one').distinct().order_by('name')
[<Author: a2>, <Author: a3>, <Author: a4>]
# Excluding across a m2m relation when there is more than one related object
# associated was problematic.
>>> Item.objects.exclude(tags__name='t1').order_by('name')
[<Item: four>, <Item: three>]
>>> Item.objects.exclude(tags__name='t1').exclude(tags__name='t4')
[<Item: three>]
# Excluding from a relation that cannot be NULL should not use outer joins.
>>> query = Item.objects.exclude(creator__in=[a1, a2]).query
>>> query.LOUTER not in [x[2] for x in query.alias_map.values()]
True
Similarly, when one of the joins cannot possibly, ever, involve NULL values (Author -> ExtraInfo, in the following), it should never be promoted to a left outer join. So the following query should only involve one "left outer" join (Author -> Item is 0-to-many).
>>> qs = Author.objects.filter(id=a1.id).filter(Q(extra__note=n1)|Q(item__note=n3))
>>> len([x[2] for x in qs.query.alias_map.values() if x[2] == query.LOUTER and qs.query.alias_refcount[x[1]]])
1
The previous changes shouldn't affect nullable foreign key joins.
>>> Tag.objects.filter(parent__isnull=True).order_by('name')
[<Tag: t1>]
>>> Tag.objects.exclude(parent__isnull=True).order_by('name')
[<Tag: t2>, <Tag: t3>, <Tag: t4>, <Tag: t5>]
>>> Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name')
[<Tag: t4>, <Tag: t5>]
>>> Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name')
[<Tag: t4>, <Tag: t5>]
>>> Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name')
[<Tag: t4>, <Tag: t5>]
>>> Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name')
[<Tag: t4>, <Tag: t5>]
Bug #2091
>>> t = Tag.objects.get(name='t4')
>>> Item.objects.filter(tags__in=[t])
[<Item: four>]
Combining querysets built on different models should behave in a well-defined
fashion. We raise an error.
>>> Author.objects.all() & Tag.objects.all()
Traceback (most recent call last):
...
AssertionError: Cannot combine queries on two different base models.
>>> Author.objects.all() | Tag.objects.all()
Traceback (most recent call last):
...
AssertionError: Cannot combine queries on two different base models.
Bug #3141
>>> Author.objects.extra(select={'foo': '1'}).count()
4
>>> Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count()
4
Bug #2400
>>> Author.objects.filter(item__isnull=True)
[<Author: a3>]
>>> Tag.objects.filter(item__isnull=True)
[<Tag: t5>]
Bug #2496
>>> Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1]
[<Item: four>]
Bug #2076
# Ordering on related tables should be possible, even if the table is not
# otherwise involved.
>>> Item.objects.order_by('note__note', 'name')
[<Item: two>, <Item: four>, <Item: one>, <Item: three>]
# Ordering on a related field should use the remote model's default ordering as
# a final step.
>>> Author.objects.order_by('extra', '-name')
[<Author: a2>, <Author: a1>, <Author: a4>, <Author: a3>]
# Using remote model default ordering can span multiple models (in this case,
# Cover is ordered by Item's default, which uses Note's default).
>>> Cover.objects.all()
[<Cover: first>, <Cover: second>]
# If the remote model does not have a default ordering, we order by its 'id'
# field.
>>> Item.objects.order_by('creator', 'name')
[<Item: one>, <Item: three>, <Item: two>, <Item: four>]
# Cross model ordering is possible in Meta, too.
>>> Ranking.objects.all()
[<Ranking: 3: a1>, <Ranking: 2: a2>, <Ranking: 1: a3>]
>>> Ranking.objects.all().order_by('rank')
[<Ranking: 1: a3>, <Ranking: 2: a2>, <Ranking: 3: a1>]
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That isn't
# Django's problem. Garbage in, garbage out.
>>> Item.objects.filter(tags__isnull=False).order_by('tags', 'id')
[<Item: one>, <Item: two>, <Item: one>, <Item: two>, <Item: four>]
# If we replace the default ordering, Django adjusts the required tables
# automatically. Item normally requires a join with Note to do the default
# ordering, but that isn't needed here.
>>> qs = Item.objects.order_by('name')
>>> qs
[<Item: four>, <Item: one>, <Item: three>, <Item: two>]
>>> len(qs.query.tables)
1
# Ordering of extra() pieces is possible, too and you can mix extra fields and
# model fields in the ordering.
>>> Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank'])
[<Ranking: 1: a3>, <Ranking: 2: a2>, <Ranking: 3: a1>]
>>> qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
>>> [o.good for o in qs.extra(order_by=('-good',))] == [True, False, False]
True
>>> qs.extra(order_by=('-good', 'id'))
[<Ranking: 3: a1>, <Ranking: 2: a2>, <Ranking: 1: a3>]
# Despite having some extra aliases in the query, we can still omit them in a
# values() query.
>>> dicts = qs.values('id', 'rank').order_by('id')
>>> [sorted(d.items()) for d in dicts]
[[('id', 1), ('rank', 2)], [('id', 2), ('rank', 1)], [('id', 3), ('rank', 3)]]
Bug #7256
# An empty values() call includes all aliases, including those from an extra()
>>> dicts = qs.values().order_by('id')
>>> [sorted(d.items()) for d in dicts]
[[('author_id', 2), ('good', 0), ('id', 1), ('rank', 2)], [('author_id', 3), ('good', 0), ('id', 2), ('rank', 1)], [('author_id', 1), ('good', 1), ('id', 3), ('rank', 3)]]
Bugs #2874, #3002
>>> qs = Item.objects.select_related().order_by('note__note', 'name')
>>> list(qs)
[<Item: two>, <Item: four>, <Item: one>, <Item: three>]
# This is also a good select_related() test because there are multiple Note
# entries in the SQL. The two Note items should be different.
>>> qs[0].note, qs[0].creator.extra.note
(<Note: n2>, <Note: n1>)
Bug #3037
>>> Item.objects.filter(Q(creator__name='a3', name='two')|Q(creator__name='a4', name='four'))
[<Item: four>]
Bug #5321, #7070
Ordering columns must be included in the output columns. Note that this means
results that might otherwise be distinct are not (if there are multiple values
in the ordering cols), as in this example. This isn't a bug; it's a warning to
be careful with the selection of ordering columns.
>>> Note.objects.values('misc').distinct().order_by('note', '-misc')
[{'misc': u'foo'}, {'misc': u'bar'}, {'misc': u'foo'}]
Bug #4358
If you don't pass any fields to values(), relation fields are returned as
"foo_id" keys, not "foo". For consistency, you should be able to pass "foo_id"
in the fields list and have it work, too. We actually allow both "foo" and
"foo_id".
# The *_id version is returned by default.
>>> 'note_id' in ExtraInfo.objects.values()[0]
True
# You can also pass it in explicitly.
>>> ExtraInfo.objects.values('note_id')
[{'note_id': 1}, {'note_id': 2}]
# ...or use the field name.
>>> ExtraInfo.objects.values('note')
[{'note': 1}, {'note': 2}]
Bug #5261
>>> Note.objects.exclude(Q())
[<Note: n1>, <Note: n2>, <Note: n3>]
Bug #3045, #3288
Once upon a time, select_related() with circular relations would loop
infinitely if you forgot to specify "depth". Now we set an arbitrary default
upper bound.
>>> X.objects.all()
[]
>>> X.objects.select_related()
[]
Bug #3739
The all() method on querysets returns a copy of the queryset.
>>> q1 = Item.objects.order_by('name')
>>> id(q1) == id(q1.all())
False
Bug #2902
Parameters can be given to extra_select, *if* you use a SortedDict.
(First we need to know which order the keys fall in "naturally" on your system,
so we can put things in the wrong way around from normal. A normal dict would
thus fail.)
>>> from django.utils.datastructures import SortedDict
>>> s = [('a', '%s'), ('b', '%s')]
>>> params = ['one', 'two']
>>> if {'a': 1, 'b': 2}.keys() == ['a', 'b']:
... s.reverse()
... params.reverse()
# This slightly odd comparison works aorund the fact that PostgreSQL will
# return 'one' and 'two' as strings, not Unicode objects. It's a side-effect of
# using constants here and not a real concern.
>>> d = Item.objects.extra(select=SortedDict(s), select_params=params).values('a', 'b')[0]
>>> d == {'a': u'one', 'b': u'two'}
True
# Order by the number of tags attached to an item.
>>> l = Item.objects.extra(select={'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'}).order_by('-count')
>>> [o.count for o in l]
[2, 2, 1, 0]
Bug #6154
Multiple filter statements are joined using "AND" all the time.
>>> Author.objects.filter(id=a1.id).filter(Q(extra__note=n1)|Q(item__note=n3))
[<Author: a1>]
>>> Author.objects.filter(Q(extra__note=n1)|Q(item__note=n3)).filter(id=a1.id)
[<Author: a1>]
Bug #6981
>>> Tag.objects.select_related('parent').order_by('name')
[<Tag: t1>, <Tag: t2>, <Tag: t3>, <Tag: t4>, <Tag: t5>]
Bug #6180, #6203 -- dates with limits and/or counts
>>> Item.objects.count()
4
>>> Item.objects.dates('created', 'month').count()
1
>>> Item.objects.dates('created', 'day').count()
2
>>> len(Item.objects.dates('created', 'day'))
2
>>> Item.objects.dates('created', 'day')[0]
datetime.datetime(2007, 12, 19, 0, 0)
Bug #7087 -- dates with extra select columns
>>> Item.objects.dates('created', 'day').extra(select={'a': 1})
[datetime.datetime(2007, 12, 19, 0, 0), datetime.datetime(2007, 12, 20, 0, 0)]
Bug #7155 -- nullable dates
>>> Item.objects.dates('modified', 'day')
[datetime.datetime(2007, 12, 19, 0, 0)]
Test that parallel iterators work.
>>> qs = Tag.objects.all()
>>> i1, i2 = iter(qs), iter(qs)
>>> i1.next(), i1.next()
(<Tag: t1>, <Tag: t2>)
>>> i2.next(), i2.next(), i2.next()
(<Tag: t1>, <Tag: t2>, <Tag: t3>)
>>> i1.next()
<Tag: t3>
>>> qs = X.objects.all()
>>> bool(qs)
False
>>> bool(qs)
False
We can do slicing beyond what is currently in the result cache, too.
## FIXME!! This next test causes really weird PostgreSQL behaviour, but it's
## only apparent much later when the full test suite runs. I don't understand
## what's going on here yet.
##
## # We need to mess with the implemenation internals a bit here to decrease the
## # cache fill size so that we don't read all the results at once.
## >>> from django.db.models import query
## >>> query.ITER_CHUNK_SIZE = 2
## >>> qs = Tag.objects.all()
##
## # Fill the cache with the first chunk.
## >>> bool(qs)
## True
## >>> len(qs._result_cache)
## 2
##
## # Query beyond the end of the cache and check that it is filled out as required.
## >>> qs[4]
## <Tag: t5>
## >>> len(qs._result_cache)
## 5
##
## # But querying beyond the end of the result set will fail.
## >>> qs[100]
## Traceback (most recent call last):
## ...
## IndexError: ...
Bug #7045 -- extra tables used to crash SQL construction on the second use.
>>> qs = Ranking.objects.extra(tables=['django_site'])
>>> s = qs.query.as_sql()
>>> s = qs.query.as_sql() # test passes if this doesn't raise an exception.
Bug #7098 -- Make sure semi-deprecated ordering by related models syntax still
works.
>>> Item.objects.values('note__note').order_by('queries_note.note', 'id')
[{'note__note': u'n2'}, {'note__note': u'n3'}, {'note__note': u'n3'}, {'note__note': u'n3'}]
Bug #7096 -- Make sure exclude() with multiple conditions continues to work.
>>> Tag.objects.filter(parent=t1, name='t3').order_by('name')
[<Tag: t3>]
>>> Tag.objects.exclude(parent=t1, name='t3').order_by('name')
[<Tag: t1>, <Tag: t2>, <Tag: t4>, <Tag: t5>]
>>> Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct()
[<Item: four>, <Item: three>, <Item: two>]
>>> Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name')
[<Item: four>, <Item: three>]
More twisted cases, involving nested negations.
>>> Item.objects.exclude(~Q(tags__name='t1', name='one'))
[<Item: one>]
>>> Item.objects.filter(~Q(tags__name='t1', name='one'), name='two')
[<Item: two>]
>>> Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two')
[<Item: four>, <Item: one>, <Item: three>]
Bug #7095
Updates that are filtered on the model being updated are somewhat tricky to get
in MySQL. This exercises that case.
>>> mm = ManagedModel.objects.create(data='mm1', tag=t1, public=True)
>>> ManagedModel.objects.update(data='mm')
A values() or values_list() query across joined models must use outer joins
appropriately.
>>> Report.objects.values_list("creator__extra__info", flat=True).order_by("name")
[u'e1', u'e2', None]
Similarly for select_related(), joins beyond an initial nullable join must
use outer joins so that all results are included.
>>> Report.objects.select_related("creator", "creator__extra").order_by("name")
[<Report: r1>, <Report: r2>, <Report: r3>]
When there are multiple paths to a table from another table, we have to be
careful not to accidentally reuse an inappropriate join when using
select_related(). We used to return the parent's Detail record here by mistake.
>>> d1 = Detail.objects.create(data="d1")
>>> d2 = Detail.objects.create(data="d2")
>>> m1 = Member.objects.create(name="m1", details=d1)
>>> m2 = Member.objects.create(name="m2", details=d2)
>>> c1 = Child.objects.create(person=m2, parent=m1)
>>> obj = m1.children.select_related("person__details")[0]
>>> obj.person.details.data
u'd2'
Bug #7076 -- excluding shouldn't eliminate NULL entries.
>>> Item.objects.exclude(modified=time1).order_by('name')
[<Item: four>, <Item: three>, <Item: two>]
>>> Tag.objects.exclude(parent__name=t1.name)
[<Tag: t1>, <Tag: t4>, <Tag: t5>]
Bug #7181 -- ordering by related tables should accomodate nullable fields (this
test is a little tricky, since NULL ordering is database dependent. Instead, we
just count the number of results).
>>> len(Tag.objects.order_by('parent__name'))
5
Bug #7107 -- this shouldn't create an infinite loop.
>>> Valid.objects.all()
[]
Empty querysets can be merged with others.
>>> Note.objects.none() | Note.objects.all()
[<Note: n1>, <Note: n2>, <Note: n3>]
>>> Note.objects.all() | Note.objects.none()
[<Note: n1>, <Note: n2>, <Note: n3>]
>>> Note.objects.none() & Note.objects.all()
[]
>>> Note.objects.all() & Note.objects.none()
[]
Bug #7204, #7506 -- make sure querysets with related fields can be pickled. If
this doesn't crash, it's a Good Thing.
>>> out = pickle.dumps(Item.objects.all())
We should also be able to pickle things that use select_related(). The only
tricky thing here is to ensure that we do the related selections properly after
unpickling.
>>> qs = Item.objects.select_related()
>>> query = qs.query.as_sql()[0]
>>> query2 = pickle.loads(pickle.dumps(qs.query))
>>> query2.as_sql()[0] == query
True
Bug #7277
>>> ann1 = Annotation.objects.create(name='a1', tag=t1)
>>> ann1.notes.add(n1)
>>> n1.annotation_set.filter(Q(tag=t5) | Q(tag__children=t5) | Q(tag__children__children=t5))
[<Annotation: a1>]
Bug #7371
>>> Related.objects.order_by('custom')
[]
Bug #7448, #7707 -- Complex objects should be converted to strings before being
used in lookups.
>>> Item.objects.filter(created__in=[time1, time2])
[<Item: one>, <Item: two>]
Bug #7698 -- People like to slice with '0' as the high-water mark.
>>> Item.objects.all()[0:0]
[]
Bug #7411 - saving to db must work even with partially read result set in
another cursor.
>>> for num in range(2 * ITER_CHUNK_SIZE + 1):
... _ = Number.objects.create(num=num)
>>> for i, obj in enumerate(Number.objects.all()):
... obj.save()
... if i > 10: break
Bug #7759 -- count should work with a partially read result set.
>>> count = Number.objects.count()
>>> qs = Number.objects.all()
>>> for obj in qs:
... qs.count() == count
... break
True
Bug #7791 -- there were "issues" when ordering and distinct-ing on fields
related via ForeignKeys.
>>> len(Note.objects.order_by('extrainfo__info').distinct())
3
Bug #7778 - Model subclasses could not be deleted if a nullable foreign key
relates to a model that relates back.
>>> num_celebs = Celebrity.objects.count()
>>> tvc = TvChef.objects.create(name="Huey")
>>> Celebrity.objects.count() == num_celebs + 1
True
>>> f1 = Fan.objects.create(fan_of=tvc)
>>> f2 = Fan.objects.create(fan_of=tvc)
>>> tvc.delete()
# The parent object should have been deleted as well.
>>> Celebrity.objects.count() == num_celebs
True
"""}
# In Python 2.3, exceptions raised in __len__ are swallowed (Python issue
# 1242657), so these cases return an empty list, rather than raising an
# exception. Not a lot we can do about that, unfortunately, due to the way
# Python handles list() calls internally. Thus, we skip the tests for Python
# 2.3.
if sys.version_info >= (2, 4):
__test__["API_TESTS"] += """
# If you're not careful, it's possible to introduce infinite loops via default
# ordering on foreign keys in a cycle. We detect that.
>>> LoopX.objects.all()
Traceback (most recent call last):
...
FieldError: Infinite loop caused by ordering.
>>> LoopZ.objects.all()
Traceback (most recent call last):
...
FieldError: Infinite loop caused by ordering.
# ... but you can still order in a non-recursive fashion amongst linked fields
# (the previous test failed because the default ordering was recursive).
>>> LoopX.objects.all().order_by('y__x__y__x__id')
[]
"""
| agpl-3.0 |
rooshilp/CMPUT410Lab6 | virt_env/virt1/lib/python2.7/site-packages/django/contrib/admin/filters.py | 70 | 16602 | """
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.db import models
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils.encoding import smart_text, force_text
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.admin.utils import (get_model_from_relation,
reverse_field_path, get_limit_choices_to_from_path, prepare_lookup_value)
from django.contrib.admin.options import IncorrectLookupParameters
class ListFilter(object):
title = None # Human-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide a has_output() method')
def choices(self, cl):
"""
Returns choices ready to be output in the template.
"""
raise NotImplementedError('subclasses of ListFilter must provide a choices() method')
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError('subclasses of ListFilter must provide a queryset() method')
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(
request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name, None)
def lookups(self, request, model_admin):
"""
Must be overridden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError(
'The SimpleListFilter.lookups() method must be overridden to '
'return a list of tuples (value, verbose value)')
def expected_parameters(self):
return [self.parameter_name]
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_text(lookup),
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super(FieldListFilter, self).__init__(
request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(field, request, params,
model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull)
self.lookup_choices = field.get_choices(include_blank=False)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def has_output(self):
if (isinstance(self.field, models.related.RelatedObject) and
self.field.field.null or hasattr(self.field, 'rel') and
self.field.null):
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == smart_text(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field, models.related.RelatedObject) and
self.field.field.null or hasattr(self.field, 'rel') and
self.field.null):
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: (
bool(f.rel) if hasattr(f, 'rel') else
isinstance(f, models.related.RelatedObject)), RelatedFieldListFilter)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg2 = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(BooleanFieldListFilter, self).__init__(field,
request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, cl):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup,
}, [self.lookup_kwarg2]),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_val2 == 'True',
'query_string': cl.get_query_string({
self.lookup_kwarg2: 'True',
}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
FieldListFilter.register(lambda f: isinstance(f,
(models.BooleanField, models.NullBooleanField)), BooleanFieldListFilter)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
super(ChoicesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for lookup, title in self.field.flatchoices:
yield {
'selected': smart_text(lookup) == self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup}),
'display': title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.date_params = dict((k, v) for k, v in params.items()
if k.startswith(self.field_generic))
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if timezone.is_aware(now):
now = timezone.localtime(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
if today.month == 12:
next_month = today.replace(year=today.year + 1, month=1, day=1)
else:
next_month = today.replace(month=today.month + 1, day=1)
next_year = today.replace(year=today.year + 1, month=1, day=1)
self.lookup_kwarg_since = '%s__gte' % field_path
self.lookup_kwarg_until = '%s__lt' % field_path
self.links = (
(_('Any date'), {}),
(_('Today'), {
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This month'), {
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(next_month),
}),
(_('This year'), {
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(next_year),
}),
)
super(DateFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg_since, self.lookup_kwarg_until]
def choices(self, cl):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': cl.get_query_string(
param_dict, [self.field_generic]),
'display': title,
}
FieldListFilter.register(
lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull,
None)
parent_model, reverse_path = reverse_field_path(model, field_path)
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': (self.lookup_val is None
and self.lookup_val_isnull is None),
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_text(val)
yield {
'selected': self.lookup_val == val,
'query_string': cl.get_query_string({
self.lookup_kwarg: val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
| apache-2.0 |
jaysonsantos/servo | etc/ci/performance/test_differ.py | 77 | 1744 | #!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import json
parser = argparse.ArgumentParser(description="Diff between two runs of performance tests.")
parser.add_argument("file1", help="the first output json from runner")
parser.add_argument("file2", help="the second output json from runner")
args = parser.parse_args()
def load_data(filename):
with open(filename, 'r') as f:
results = {}
totals = {}
counts = {}
records = json.load(f)
for record in records:
key = record.get('testcase')
value = record.get('domComplete') - record.get('domLoading')
totals[key] = totals.get('key', 0) + value
counts[key] = counts.get('key', 0) + 1
results[key] = round(totals[key] / counts[key])
return results
data1 = load_data(args.file1)
data2 = load_data(args.file2)
keys = set(data1.keys()).union(data2.keys())
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
END = '\033[0m'
for key in keys:
value1 = data1.get(key)
value2 = data2.get(key)
if value1 and not(value2):
print ("{}Test {}: missing from {}.{}".format(WARNING, key, args.file2, END))
elif value2 and not(value1):
print ("{}Test {}: missing from {}.{}".format(WARNING, key, args.file1, END))
elif value1 and value2:
diff = value2 - value1
change = diff / value1
color = BLUE if value1 <= value2 else GREEN
print("{}{:6} {:6} {:+6} {:+8.2%} {}.{}".format(color, value1, value2, diff, change, key, END))
| mpl-2.0 |
Gadal/sympy | sympy/utilities/tests/test_codegen_octave.py | 4 | 17828 | from sympy.core import (S, symbols, Eq, pi, Catalan, EulerGamma, Lambda,
Dummy, Function)
from sympy.core.compatibility import StringIO
from sympy import erf, Integral, Piecewise
from sympy import Equality
from sympy.matrices import Matrix, MatrixSymbol
from sympy.printing.codeprinter import Assignment
from sympy.utilities.codegen import OctaveCodeGen, codegen, make_routine
from sympy.utilities.pytest import raises
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import XFAIL
import sympy
x, y, z = symbols('x,y,z')
def test_empty_m_code():
code_gen = OctaveCodeGen()
output = StringIO()
code_gen.dump_m([], output, "file", header=False, empty=False)
source = output.getvalue()
assert source == ""
def test_m_simple_code():
name_expr = ("test", (x + y)*z)
result, = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0] == "test.m"
source = result[1]
expected = (
"function out1 = test(x, y, z)\n"
" out1 = z.*(x + y);\n"
"end\n"
)
assert source == expected
def test_m_simple_code_with_header():
name_expr = ("test", (x + y)*z)
result, = codegen(name_expr, "Octave", header=True, empty=False)
assert result[0] == "test.m"
source = result[1]
expected = (
"function out1 = test(x, y, z)\n"
" %TEST Autogenerated by sympy\n"
" % Code generated with sympy " + sympy.__version__ + "\n"
" %\n"
" % See http://www.sympy.org/ for more information.\n"
" %\n"
" % This file is part of 'project'\n"
" out1 = z.*(x + y);\n"
"end\n"
)
assert source == expected
def test_m_simple_code_nameout():
expr = Equality(z, (x + y))
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function z = test(x, y)\n"
" z = x + y;\n"
"end\n"
)
assert source == expected
def test_m_numbersymbol():
name_expr = ("test", pi**Catalan)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = test()\n"
" out1 = pi^0.915965594177219;\n"
"end\n"
)
assert source == expected
@XFAIL
def test_m_numbersymbol_no_inline():
# FIXME: how to pass inline=False to the OctaveCodePrinter?
name_expr = ("test", [pi**Catalan, EulerGamma])
result, = codegen(name_expr, "Octave", header=False,
empty=False, inline=False)
source = result[1]
expected = (
"function [out1, out2] = test()\n"
" Catalan = 0.915965594177219; % constant\n"
" EulerGamma = 0.5772156649015329; % constant\n"
" out1 = pi^Catalan;\n"
" out2 = EulerGamma;\n"
"end\n"
)
assert source == expected
def test_m_code_argument_order():
expr = x + y
routine = make_routine("test", expr, argument_sequence=[z, x, y], language="octave")
code_gen = OctaveCodeGen()
output = StringIO()
code_gen.dump_m([routine], output, "test", header=False, empty=False)
source = output.getvalue()
expected = (
"function out1 = test(z, x, y)\n"
" out1 = x + y;\n"
"end\n"
)
assert source == expected
def test_multiple_results_m():
# Here the output order is the input order
expr1 = (x + y)*z
expr2 = (x - y)*z
name_expr = ("test", [expr1, expr2])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2] = test(x, y, z)\n"
" out1 = z.*(x + y);\n"
" out2 = z.*(x - y);\n"
"end\n"
)
assert source == expected
def test_results_named_unordered():
# Here output order is based on name_expr
A, B, C = symbols('A,B,C')
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, (x - y)*z)
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [C, A, B] = test(x, y, z)\n"
" C = z.*(x + y);\n"
" A = z.*(x - y);\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_results_named_ordered():
A, B, C = symbols('A,B,C')
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, (x - y)*z)
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result = codegen(name_expr, "Octave", header=False, empty=False,
argument_sequence=(x, z, y))
assert result[0][0] == "test.m"
source = result[0][1]
expected = (
"function [C, A, B] = test(x, z, y)\n"
" C = z.*(x + y);\n"
" A = z.*(x - y);\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_complicated_m_codegen():
from sympy import sin, cos, tan
name_expr = ("testlong",
[ ((sin(x) + cos(y) + tan(z))**3).expand(),
cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))))
])
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "testlong.m"
source = result[0][1]
expected = (
"function [out1, out2] = testlong(x, y, z)\n"
" out1 = sin(x).^3 + 3*sin(x).^2.*cos(y) + 3*sin(x).^2.*tan(z)"
" + 3*sin(x).*cos(y).^2 + 6*sin(x).*cos(y).*tan(z) + 3*sin(x).*tan(z).^2"
" + cos(y).^3 + 3*cos(y).^2.*tan(z) + 3*cos(y).*tan(z).^2 + tan(z).^3;\n"
" out2 = cos(cos(cos(cos(cos(cos(cos(cos(x + y + z))))))));\n"
"end\n"
)
assert source == expected
def test_m_output_arg_mixed_unordered():
# named outputs are alphabetical, unnamed output appear in the given order
from sympy import sin, cos, tan
a = symbols("a")
name_expr = ("foo", [cos(2*x), Equality(y, sin(x)), cos(x), Equality(a, sin(2*x))])
result, = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0] == "foo.m"
source = result[1];
expected = (
'function [out1, y, out3, a] = foo(x)\n'
' out1 = cos(2*x);\n'
' y = sin(x);\n'
' out3 = cos(x);\n'
' a = sin(2*x);\n'
'end\n'
)
assert source == expected
def test_m_piecewise_():
pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True))
name_expr = ("pwtest", pw)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = pwtest(x)\n"
" out1 = ((x < -1).*(0) + (~(x < -1)).*( ...\n"
" (x <= 1).*(x.^2) + (~(x <= 1)).*( ...\n"
" (x > 1).*(-x + 2) + (~(x > 1)).*(1))));\n"
"end\n"
)
assert source == expected
@XFAIL
def test_m_piecewise_no_inline():
# FIXME: how to pass inline=False to the OctaveCodePrinter?
pw = Piecewise((0, x < -1), (x**2, x <= 1), (-x+2, x > 1), (1, True))
name_expr = ("pwtest", pw)
result, = codegen(name_expr, "Octave", header=False, empty=False,
inline=False)
source = result[1]
expected = (
"function out1 = pwtest(x)\n"
" if (x < -1)\n"
" out1 = 0;\n"
" elseif (x <= 1)\n"
" out1 = x.^2;\n"
" elseif (x > 1)\n"
" out1 = -x + 2;\n"
" else\n"
" out1 = 1;\n"
" end\n"
"end\n"
)
assert source == expected
def test_m_multifcns_per_file():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "foo.m"
source = result[0][1];
expected = (
"function [out1, out2] = foo(x, y)\n"
" out1 = 2*x;\n"
" out2 = 3*y;\n"
"end\n"
"function [out1, out2] = bar(y)\n"
" out1 = y.^2;\n"
" out2 = 4*y;\n"
"end\n"
)
assert source == expected
def test_m_multifcns_per_file_w_header():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
result = codegen(name_expr, "Octave", header=True, empty=False)
assert result[0][0] == "foo.m"
source = result[0][1];
expected = (
"function [out1, out2] = foo(x, y)\n"
" %FOO Autogenerated by sympy\n"
" % Code generated with sympy " + sympy.__version__ + "\n"
" %\n"
" % See http://www.sympy.org/ for more information.\n"
" %\n"
" % This file is part of 'project'\n"
" out1 = 2*x;\n"
" out2 = 3*y;\n"
"end\n"
"function [out1, out2] = bar(y)\n"
" out1 = y.^2;\n"
" out2 = 4*y;\n"
"end\n"
)
assert source == expected
def test_m_filename_match_first_fcn():
name_expr = [ ("foo", [2*x, 3*y]), ("bar", [y**2, 4*y]) ]
raises(ValueError, lambda: codegen(name_expr,
"Octave", prefix="bar", header=False, empty=False))
def test_m_matrix_named():
e2 = Matrix([[x, 2*y, pi*z]])
name_expr = ("test", Equality(MatrixSymbol('myout1', 1, 3), e2))
result = codegen(name_expr, "Octave", header=False, empty=False)
assert result[0][0] == "test.m"
source = result[0][1]
expected = (
"function myout1 = test(x, y, z)\n"
" myout1 = [x 2*y pi*z];\n"
"end\n"
)
assert source == expected
def test_m_matrix_named_matsym():
myout1 = MatrixSymbol('myout1', 1, 3)
e2 = Matrix([[x, 2*y, pi*z]])
name_expr = ("test", Equality(myout1, e2, evaluate=False))
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function myout1 = test(x, y, z)\n"
" myout1 = [x 2*y pi*z];\n"
"end\n"
)
assert source == expected
def test_m_matrix_output_autoname():
expr = Matrix([[x, x+y, 3]])
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function out1 = test(x, y)\n"
" out1 = [x x + y 3];\n"
"end\n"
)
assert source == expected
def test_m_matrix_output_autoname_2():
e1 = (x + y)
e2 = Matrix([[2*x, 2*y, 2*z]])
e3 = Matrix([[x], [y], [z]])
e4 = Matrix([[x, y], [z, 16]])
name_expr = ("test", (e1, e2, e3, e4))
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2, out3, out4] = test(x, y, z)\n"
" out1 = x + y;\n"
" out2 = [2*x 2*y 2*z];\n"
" out3 = [x; y; z];\n"
" out4 = [x y;\n"
" z 16];\n"
"end\n"
)
assert source == expected
def test_m_results_matrix_named_ordered():
B, C = symbols('B,C')
A = MatrixSymbol('A', 1, 3)
expr1 = Equality(C, (x + y)*z)
expr2 = Equality(A, Matrix([[1, 2, x]]))
expr3 = Equality(B, 2*x)
name_expr = ("test", [expr1, expr2, expr3])
result, = codegen(name_expr, "Octave", header=False, empty=False,
argument_sequence=(x, z, y))
source = result[1]
expected = (
"function [C, A, B] = test(x, z, y)\n"
" C = z.*(x + y);\n"
" A = [1 2 x];\n"
" B = 2*x;\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice():
A = MatrixSymbol('A', 2, 3)
B = MatrixSymbol('B', 1, 3)
C = MatrixSymbol('C', 1, 3)
D = MatrixSymbol('D', 2, 1)
name_expr = ("test", [Equality(B, A[0, :]),
Equality(C, A[1, :]),
Equality(D, A[:, 2])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C, D] = test(A)\n"
" B = A(1, :);\n"
" C = A(2, :);\n"
" D = A(:, 3);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice2():
A = MatrixSymbol('A', 3, 4)
B = MatrixSymbol('B', 2, 2)
C = MatrixSymbol('C', 2, 2)
name_expr = ("test", [Equality(B, A[0:2, 0:2]),
Equality(C, A[0:2, 1:3])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C] = test(A)\n"
" B = A(1:2, 1:2);\n"
" C = A(1:2, 2:3);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice3():
A = MatrixSymbol('A', 8, 7)
B = MatrixSymbol('B', 2, 2)
C = MatrixSymbol('C', 4, 2)
name_expr = ("test", [Equality(B, A[6:, 1::3]),
Equality(C, A[::2, ::3])])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, C] = test(A)\n"
" B = A(7:end, 2:3:end);\n"
" C = A(1:2:end, 1:3:end);\n"
"end\n"
)
assert source == expected
def test_m_matrixsymbol_slice_autoname():
A = MatrixSymbol('A', 2, 3)
B = MatrixSymbol('B', 1, 3)
name_expr = ("test", [Equality(B, A[0,:]), A[1,:], A[:,0], A[:,1]])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [B, out2, out3, out4] = test(A)\n"
" B = A(1, :);\n"
" out2 = A(2, :);\n"
" out3 = A(:, 1);\n"
" out4 = A(:, 2);\n"
"end\n"
)
assert source == expected
def test_m_loops():
# Note: an Octave programmer would probably vectorize this across one or
# more dimensions. Also, size(A) would be used rather than passing in m
# and n. Perhaps users would expect us to vectorize automatically here?
# Or is it possible to represent such things using IndexedBase?
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
result, = codegen(('mat_vec_mult', Eq(y[i], A[i, j]*x[j])), "Octave",
header=False, empty=False)
source = result[1]
expected = (
'function y = mat_vec_mult(A, m, n, x)\n'
' for i = 1:m\n'
' y(i) = 0;\n'
' end\n'
' for i = 1:m\n'
' for j = 1:n\n'
' y(i) = %(rhs)s + y(i);\n'
' end\n'
' end\n'
'end\n'
)
assert (source == expected % {'rhs': 'A(%s, %s).*x(j)' % (i, j)} or
source == expected % {'rhs': 'x(j).*A(%s, %s)' % (i, j)})
def test_m_tensor_loops_multiple_contractions():
# see comments in previous test about vectorizing
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
A = IndexedBase('A')
B = IndexedBase('B')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
result, = codegen(('tensorthing', Eq(y[i], B[j, k, l]*A[i, j, k, l])),
"Octave", header=False, empty=False)
source = result[1]
expected = (
'function y = tensorthing(A, B, m, n, o, p)\n'
' for i = 1:m\n'
' y(i) = 0;\n'
' end\n'
' for i = 1:m\n'
' for j = 1:n\n'
' for k = 1:o\n'
' for l = 1:p\n'
' y(i) = y(i) + B(j, k, l).*A(i, j, k, l);\n'
' end\n'
' end\n'
' end\n'
' end\n'
'end\n'
)
assert source == expected
def test_m_InOutArgument():
expr = Equality(x, x**2)
name_expr = ("mysqr", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function x = mysqr(x)\n"
" x = x.^2;\n"
"end\n"
)
assert source == expected
def test_m_InOutArgument_order():
# can specify the order as (x, y)
expr = Equality(x, x**2 + y)
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False,
empty=False, argument_sequence=(x,y))
source = result[1]
expected = (
"function x = test(x, y)\n"
" x = x.^2 + y;\n"
"end\n"
)
assert source == expected
# make sure it gives (x, y) not (y, x)
expr = Equality(x, x**2 + y)
name_expr = ("test", expr)
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function x = test(x, y)\n"
" x = x.^2 + y;\n"
"end\n"
)
assert source == expected
def test_m_not_supported():
f = Function('f')
name_expr = ("test", [f(x).diff(x), S.ComplexInfinity])
result, = codegen(name_expr, "Octave", header=False, empty=False)
source = result[1]
expected = (
"function [out1, out2] = test(x)\n"
" % unsupported: Derivative(f(x), x)\n"
" % unsupported: zoo\n"
" out1 = Derivative(f(x), x);\n"
" out2 = zoo;\n"
"end\n"
)
assert source == expected
def test_global_vars_octave():
x, y, z, t = symbols("x y z t")
result = codegen(('f', x*y), "Octave", header=False, empty=False,
global_vars=(y,))
source = result[0][1]
expected = (
"function out1 = f(x)\n"
" out1 = x.*y;\n"
"end\n"
)
assert source == expected
result = codegen(('f', x*y+z), "Octave", header=False, empty=False,
argument_sequence=(x, y), global_vars=(z, t))
source = result[0][1]
expected = (
"function out1 = f(x, y)\n"
" out1 = x.*y + z;\n"
"end\n"
)
assert source == expected
| bsd-3-clause |
BIT-SYS/gem5-spm-module | src/mem/slicc/main.py | 83 | 3608 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from slicc.parser import SLICC
usage="%prog [options] <files> ... "
version="%prog v0.4"
brief_copyright='''
Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
Copyright (c) 2009 The Hewlett-Packard Development Company
All Rights Reserved.
'''
def nprint(format, *args):
pass
def eprint(format, *args):
if args:
format = format % args
print >>sys.stderr, format
def main(args=None):
import optparse
parser = optparse.OptionParser(usage=usage, version=version,
description=brief_copyright)
parser.add_option("-d", "--debug", default=False, action="store_true",
help="Turn on PLY debugging")
parser.add_option("-C", "--code-path", default="generated",
help="Path where C++ code output code goes")
parser.add_option("-H", "--html-path",
help="Path where html output goes")
parser.add_option("-F", "--print-files",
help="Print files that SLICC will generate")
parser.add_option("--tb", "--traceback", action='store_true',
help="print traceback on error")
parser.add_option("-q", "--quiet",
help="don't print messages")
opts,files = parser.parse_args(args=args)
if len(files) != 1:
parser.print_help()
sys.exit(2)
output = nprint if opts.quiet else eprint
output("SLICC v0.4")
output("Parsing...")
slicc = SLICC(files[0], verbose=True, debug=opts.debug, traceback=opts.tb)
if opts.print_files:
for i in sorted(slicc.files()):
print ' %s' % i
else:
output("Processing AST...")
slicc.process()
output("Writing C++ files...")
slicc.writeCodeFiles(opts.code_path)
if opts.html_path:
output("Writing HTML files...")
slicc.writeHTMLFiles(opts.html_path)
output("SLICC is Done.")
if __name__ == "__main__":
main()
| bsd-3-clause |
coursemdetw/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/linecache.py | 785 | 3864 | """Cache lines from files.
This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.
"""
import sys
import os
import tokenize
__all__ = ["getline", "clearcache", "checkcache"]
def getline(filename, lineno, module_globals=None):
lines = getlines(filename, module_globals)
if 1 <= lineno <= len(lines):
return lines[lineno-1]
else:
return ''
# The cache
cache = {} # The cache
def clearcache():
"""Clear the cache entirely."""
global cache
cache = {}
def getlines(filename, module_globals=None):
"""Get the lines for a file from the cache.
Update the cache if it doesn't contain an entry for this file already."""
if filename in cache:
return cache[filename][2]
else:
return updatecache(filename, module_globals)
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = list(cache.keys())
else:
if filename in cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del cache[filename]
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
del cache[filename]
if not filename or (filename.startswith('<') and filename.endswith('>')):
return []
fullname = filename
try:
stat = os.stat(fullname)
except OSError:
basename = filename
# Try for a __loader__, if available
if module_globals and '__loader__' in module_globals:
name = module_globals.get('__name__')
loader = module_globals['__loader__']
get_source = getattr(loader, 'get_source', None)
if name and get_source:
try:
data = get_source(name)
except (ImportError, IOError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (
len(data), None,
[line+'\n' for line in data.splitlines()], fullname
)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue
try:
stat = os.stat(fullname)
break
except os.error:
pass
else:
return []
try:
with tokenize.open(fullname) as fp:
lines = fp.readlines()
except IOError:
return []
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines
| agpl-3.0 |
kmdm/sspkd | src/sspkd/pull.py | 1 | 1076 | import os
import subprocess
from conf import SspkdConfig
from errors import *
from util import SspkdUtil
class SspkdPull(object):
def __init__(self):
self.cf = SspkdConfig()
self.util = SspkdUtil()
def _pull(self):
if self.cf.server.enabled:
with open(self.cf.server.database, "r") as f:
return f.read()
cmdline = [
'ssh', '-o IdentityFile=%s' % self.cf.client.pushkey,
'-o IdentitiesOnly=yes',
self.cf.client.keyserver, 'fetch'
]
ssh = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = ssh.communicate()
return out
def pull(self, install=False, verify=True):
signed_keys = self._pull()
if verify:
sshkeys = self.util.verify_keys(signed_keys)
if install:
self.util.install_keys(sshkeys)
else:
sshkeys = signed_keys
return sshkeys
| gpl-3.0 |
mortentoo/maya | scripts/usability/replace_maya_shader_with_redshift/replace_maya_shader_with_redshift.py | 1 | 2493 | '''
Takes selected materials and replaces them with Redshift materials imported from shaderFilePath.
Finds the gammaCorrect node attached to the Redshift material's "diffuse_color" attribute, and assigns it the color value of the selected material(s). If it does not find a gammaCorrect node, it simply assign the color of the material to the diffuse color of the Redshift material.
Finally, it assigns the Redshift material to the objects the material was assigned to.
'''
# import packages
import pymel.core as pm
import os
def maya2RedshiftShader():
# choose shader file to import
shaderFilePath = pm.fileDialog2(caption='Import Redshift shader', dialogStyle=2, fileMode=1, fileFilter='Maya Files (*.ma *.mb);;Maya ASCII (*.ma);;Maya Binary (*.mb)')
if not shaderFilePath:
return
shaderFilePath = shaderFilePath[0]
# use filename as namespace
filename = os.path.basename(shaderFilePath)
namespace = os.path.splitext(filename)[0]
# get selected materials
selectedMaterials = pm.ls(orderedSelection=True, materials=True)
redshiftNodes = []
# for each material...
for material in selectedMaterials:
# import shader
nodes = pm.system.importFile(shaderFilePath, returnNewNodes=True, defaultNamespace=False, namespace=namespace)
# find RedshiftMaterial node in imported nodes
redshiftNode = None
for node in nodes:
if pm.nodeType(node) == 'RedshiftMaterial':
redshiftNode = node
redshiftNodes.append(redshiftNode)
break
# get color from original material and assign it to the gammeCorrect node - or diffuse color if no gamma correct node is attached
colorValue = pm.getAttr(material + '.color')
# get connected node, the gammaCorrect node
gammaCorrectNode = pm.listConnections(redshiftNode + '.diffuse_color', destination=False, source=True)[0]
if gammaCorrectNode:
if pm.nodeType(gammaCorrectNode) == 'gammaCorrect':
pm.setAttr(gammaCorrectNode + '.value', colorValue, type='double3')
else:
pm.setAttr(redshiftNode + '.diffuse_color', colorValue, type='double3')
# get objects with material and assign redshift material to them
sg = material.shadingGroups()
setMembers = pm.sets(sg, q=True)
pm.select(setMembers, r=True)
if pm.ls(sl=True):
pm.hyperShade(assign=redshiftNode)
pm.select(pm.hyperShade(objects=material), r=True)
if pm.ls(sl=True):
pm.hyperShade(assign=redshiftNode)
# select the imported redshift nodes
pm.select(redshiftNodes, r=True)
maya2RedshiftShader()
| gpl-3.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Lib/distutils/command/build_clib.py | 176 | 8131 | """distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
__revision__ = "$Id$"
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os
from distutils.core import Command
from distutils.errors import DistutilsSetupError
from distutils.sysconfig import customize_compiler
from distutils import log
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib(Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib=', 'b',
"directory to build C/C++ libraries to"),
('build-temp=', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
def finalize_options(self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
def run(self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
def check_library_list(self, libraries):
"""Ensure that the list of libraries is valid.
`library` is presumably provided as a command option 'libraries'.
This method checks that it is a list of 2-tuples, where the tuples
are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(libraries, list):
raise DistutilsSetupError, \
"'libraries' option must be a list of tuples"
for lib in libraries:
if not isinstance(lib, tuple) and len(lib) != 2:
raise DistutilsSetupError, \
"each element of 'libraries' must a 2-tuple"
name, build_info = lib
if not isinstance(name, str):
raise DistutilsSetupError, \
"first element of each tuple in 'libraries' " + \
"must be a string (the library name)"
if '/' in name or (os.sep != '/' and os.sep in name):
raise DistutilsSetupError, \
("bad library name '%s': " +
"may not contain directory separators") % \
lib[0]
if not isinstance(build_info, dict):
raise DistutilsSetupError, \
"second element of each tuple in 'libraries' " + \
"must be a dictionary (build info)"
def get_library_names(self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames") % lib_name
filenames.extend(sources)
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError, \
("in 'libraries' option (library '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % lib_name
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
| mit |
yasserglez/tagfs | packages/tagfs/contrib/django/utils/simplejson/scanner.py | 928 | 2227 | """JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| mit |
SMALLplayer/smallplayer-image-creator | storage/.xbmc/addons/plugin.video.youtube/YouTubePlayer.py | 2 | 16337 | '''
YouTube plugin for XBMC
Copyright (C) 2010-2012 Tobias Ussing And Henrik Mosgaard Jensen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
import urllib
import cgi
try: import simplejson as json
except ImportError: import json
class YouTubePlayer():
fmt_value = {
5: "240p h263 flv container",
18: "360p h264 mp4 container | 270 for rtmpe?",
22: "720p h264 mp4 container",
26: "???",
33: "???",
34: "360p h264 flv container",
35: "480p h264 flv container",
37: "1080p h264 mp4 container",
38: "720p vp8 webm container",
43: "360p h264 flv container",
44: "480p vp8 webm container",
45: "720p vp8 webm container",
46: "520p vp8 webm stereo",
59: "480 for rtmpe",
78: "seems to be around 400 for rtmpe",
82: "360p h264 stereo",
83: "240p h264 stereo",
84: "720p h264 stereo",
85: "520p h264 stereo",
100: "360p vp8 webm stereo",
101: "480p vp8 webm stereo",
102: "720p vp8 webm stereo",
120: "hd720",
121: "hd1080"
}
# YouTube Playback Feeds
urls = {}
urls['video_stream'] = "http://www.youtube.com/watch?v=%s&safeSearch=none"
urls['embed_stream'] = "http://www.youtube.com/get_video_info?video_id=%s"
urls['video_info'] = "http://gdata.youtube.com/feeds/api/videos/%s"
def __init__(self):
self.xbmcgui = sys.modules["__main__"].xbmcgui
self.xbmcplugin = sys.modules["__main__"].xbmcplugin
self.pluginsettings = sys.modules["__main__"].pluginsettings
self.storage = sys.modules["__main__"].storage
self.settings = sys.modules["__main__"].settings
self.language = sys.modules["__main__"].language
self.dbg = sys.modules["__main__"].dbg
self.common = sys.modules["__main__"].common
self.utils = sys.modules["__main__"].utils
self.cache = sys.modules["__main__"].cache
self.core = sys.modules["__main__"].core
self.login = sys.modules["__main__"].login
self.subtitles = sys.modules["__main__"].subtitles
def playVideo(self, params={}):
self.common.log(repr(params), 3)
get = params.get
(video, status) = self.buildVideoObject(params)
if status != 200:
self.common.log(u"construct video url failed contents of video item " + repr(video))
self.utils.showErrorMessage(self.language(30603), video["apierror"], status)
return False
listitem = self.xbmcgui.ListItem(label=video['Title'], iconImage=video['thumbnail'], thumbnailImage=video['thumbnail'], path=video['video_url'])
listitem.setInfo(type='Video', infoLabels=video)
self.common.log(u"Playing video: " + repr(video['Title']) + " - " + repr(get('videoid')) + " - " + repr(video['video_url']))
self.xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=listitem)
if self.settings.getSetting("lang_code") != "0" or self.settings.getSetting("annotations") == "true":
self.common.log("BLAAAAAAAAAAAAAAAAAAAAAA: " + repr(self.settings.getSetting("lang_code")))
self.subtitles.addSubtitles(video)
if (get("watch_later") == "true" and get("playlist_entry_id")):
self.common.log(u"removing video from watch later playlist")
self.core.remove_from_watch_later(params)
self.storage.storeValue("vidstatus-" + video['videoid'], "7")
def getInfo(self, params):
get = params.get
video = self.cache.get("videoidcache" + get("videoid"))
if len(video) > 0:
self.common.log(u"returning cache ")
return (eval(video), 200)
result = self.core._fetchPage({"link": self.urls["video_info"] % get("videoid"), "api": "true"})
if result["status"] == 200:
video = self.core.getVideoInfo(result["content"], params)
if len(video) == 0:
self.common.log(u"- Couldn't parse API output, YouTube doesn't seem to know this video id?")
video = {}
video["apierror"] = self.language(30608)
return (video, 303)
else:
self.common.log(u"- Got API Error from YouTube!")
video = {}
video["apierror"] = result["content"]
return (video, 303)
video = video[0]
self.cache.set("videoidcache" + get("videoid"), repr(video))
return (video, result["status"])
def selectVideoQuality(self, params, links):
get = params.get
print "links: " + repr(type(links).__name__)
link = links.get
video_url = ""
self.common.log(u"")
if get("action") == "download":
hd_quality = int(self.settings.getSetting("hd_videos_download"))
if (hd_quality == 0):
hd_quality = int(self.settings.getSetting("hd_videos"))
else:
if (not get("quality")):
hd_quality = int(self.settings.getSetting("hd_videos"))
else:
if (get("quality") == "1080p"):
hd_quality = 3
elif (get("quality") == "720p"):
hd_quality = 2
else:
hd_quality = 1
# SD videos are default, but we go for the highest res
if (link(35)):
video_url = link(35)
elif (link(59)):
video_url = link(59)
elif link(44):
video_url = link(44)
elif (link(78)):
video_url = link(78)
elif (link(34)):
video_url = link(34)
elif (link(43)):
video_url = link(43)
elif (link(26)):
video_url = link(26)
elif (link(18)):
video_url = link(18)
elif (link(33)):
video_url = link(33)
elif (link(5)):
video_url = link(5)
if hd_quality > 1: # <-- 720p
if (link(22)):
video_url = link(22)
elif (link(45)):
video_url = link(45)
elif link(120):
video_url = link(120)
if hd_quality > 2:
if (link(37)):
video_url = link(37)
elif link(121):
video_url = link(121)
if link(38) and False:
video_url = link(38)
for fmt_key in links.iterkeys():
if link(int(fmt_key)):
if self.dbg:
text = repr(fmt_key) + " - "
if fmt_key in self.fmt_value:
text += self.fmt_value[fmt_key]
else:
text += "Unknown"
if (link(int(fmt_key)) == video_url):
text += "*"
self.common.log(text)
else:
self.common.log(u"- Missing fmt_value: " + repr(fmt_key))
if hd_quality == 0 and not get("quality"):
return self.userSelectsVideoQuality(params, links)
if not len(video_url) > 0:
self.common.log(u"- construct_video_url failed, video_url not set")
return video_url
if get("action") != "download" and video_url.find("rtmp") == -1:
video_url += '|' + urllib.urlencode({'User-Agent':self.common.USERAGENT})
self.common.log(u"Done")
return video_url
def userSelectsVideoQuality(self, params, links):
levels = [([37,121], u"1080p"),
([22,45,120], u"720p"),
([35,44], u"480p"),
([18], u"380p"),
([34,43],u"360p"),
([5],u"240p"),
([17],u"144p")]
link = links.get
quality_list = []
choices = []
for qualities, name in levels:
for quality in qualities:
if link(quality):
quality_list.append((quality, name))
break
for (quality, name) in quality_list:
choices.append(name)
dialog = self.xbmcgui.Dialog()
selected = dialog.select(self.language(30518), choices)
if selected > -1:
(quality, name) = quality_list[selected]
return link(quality)
return u""
def checkForErrors(self, video):
status = 200
if "video_url" not in video or video[u"video_url"] == u"":
status = 303
if u"apierror" not in video:
vget = video.get
if vget(u"live_play"):
video[u'apierror'] = self.language(30612)
elif vget(u"stream_map"):
video[u'apierror'] = self.language(30620)
else:
video[u'apierror'] = self.language(30618)
return (video, status)
def buildVideoObject(self, params):
self.common.log(repr(params))
(video, status) = self.getInfo(params)
if status != 200:
video[u'apierror'] = self.language(30618)
return (video, 303)
video_url = self.subtitles.getLocalFileSource(params, video)
if video_url:
video[u'video_url'] = video_url
return (video, 200)
(links, video) = self.extractVideoLinksFromYoutube(video, params)
if len(links) != 0:
video[u"video_url"] = self.selectVideoQuality(params, links)
elif "hlsvp" in video:
#hls selects the quality based on available bitrate (adaptive quality), no need to select it here
video[u"video_url"] = video[u"hlsvp"]
self.common.log("Using hlsvp url %s" % video[u"video_url"])
(video, status) = self.checkForErrors(video)
self.common.log(u"Done")
return (video, status)
def removeAdditionalEndingDelimiter(self, data):
pos = data.find("};")
if pos != -1:
self.common.log(u"found extra delimiter, removing")
data = data[:pos + 1]
return data
def extractFlashVars(self, data):
flashvars = {}
found = False
for line in data.split("\n"):
if line.strip().find(";ytplayer.config = ") > 0:
found = True
p1 = line.find(";ytplayer.config = ") + len(";ytplayer.config = ") - 1
p2 = line.rfind(";")
if p1 <= 0 or p2 <= 0:
continue
data = line[p1 + 1:p2]
break
data = self.removeAdditionalEndingDelimiter(data)
if found:
data = json.loads(data)
flashvars = data["args"]
self.common.log("Step2: " + repr(data))
self.common.log(u"flashvars: " + repr(flashvars), 2)
return flashvars
def scrapeWebPageForVideoLinks(self, result, video):
self.common.log(u"")
links = {}
flashvars = self.extractFlashVars(result[u"content"])
if not flashvars.has_key(u"url_encoded_fmt_stream_map"):
return links
if flashvars.has_key(u"ttsurl"):
video[u"ttsurl"] = flashvars[u"ttsurl"]
if flashvars.has_key(u"hlsvp"):
video[u"hlsvp"] = flashvars[u"hlsvp"]
for url_desc in flashvars[u"url_encoded_fmt_stream_map"].split(u","):
url_desc_map = cgi.parse_qs(url_desc)
self.common.log(u"url_map: " + repr(url_desc_map), 2)
if not (url_desc_map.has_key(u"url") or url_desc_map.has_key(u"stream")):
continue
key = int(url_desc_map[u"itag"][0])
url = u""
if url_desc_map.has_key(u"url"):
url = urllib.unquote(url_desc_map[u"url"][0])
elif url_desc_map.has_key(u"conn") and url_desc_map.has_key(u"stream"):
url = urllib.unquote(url_desc_map[u"conn"][0])
if url.rfind("/") < len(url) -1:
url = url + "/"
url = url + urllib.unquote(url_desc_map[u"stream"][0])
elif url_desc_map.has_key(u"stream") and not url_desc_map.has_key(u"conn"):
url = urllib.unquote(url_desc_map[u"stream"][0])
if url_desc_map.has_key(u"sig"):
url = url + u"&signature=" + url_desc_map[u"sig"][0]
elif url_desc_map.has_key(u"s"):
sig = url_desc_map[u"s"][0]
url = url + u"&signature=" + self.decrypt_signature(sig)
links[key] = url
return links
def decrypt_signature(self, s):
''' use decryption solution by Youtube-DL project '''
if len(s) == 88:
return s[48] + s[81:67:-1] + s[82] + s[66:62:-1] + s[85] + s[61:48:-1] + s[67] + s[47:12:-1] + s[3] + s[11:3:-1] + s[2] + s[12]
elif len(s) == 87:
return s[62] + s[82:62:-1] + s[83] + s[61:52:-1] + s[0] + s[51:2:-1]
elif len(s) == 86:
return s[2:63] + s[82] + s[64:82] + s[63]
elif len(s) == 85:
return s[76] + s[82:76:-1] + s[83] + s[75:60:-1] + s[0] + s[59:50:-1] + s[1] + s[49:2:-1]
elif len(s) == 84:
return s[83:36:-1] + s[2] + s[35:26:-1] + s[3] + s[25:3:-1] + s[26]
elif len(s) == 83:
return s[6] + s[3:6] + s[33] + s[7:24] + s[0] + s[25:33] + s[53] + s[34:53] + s[24] + s[54:]
elif len(s) == 82:
return s[36] + s[79:67:-1] + s[81] + s[66:40:-1] + s[33] + s[39:36:-1] + s[40] + s[35] + s[0] + s[67] + s[32:0:-1] + s[34]
elif len(s) == 81:
return s[6] + s[3:6] + s[33] + s[7:24] + s[0] + s[25:33] + s[2] + s[34:53] + s[24] + s[54:81]
elif len(s) == 92:
return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + s[80:83];
else:
self.common.log(u'Unable to decrypt signature, key length %d not supported; retrying might work' % (len(s)))
def getVideoPageFromYoutube(self, get):
login = "false"
if self.pluginsettings.userHasProvidedValidCredentials():
login = "true"
page = self.core._fetchPage({u"link": self.urls[u"video_stream"] % get(u"videoid"), "login": login})
self.common.log("Step1: " + repr(page["content"].find("ytplayer")))
if not page:
page = {u"status":303}
return page
def isVideoAgeRestricted(self, result):
error = self.common.parseDOM(result['content'], "div", attrs={"id": "watch7-player-age-gate-content"})
self.common.log(repr(error))
return len(error) > 0
def extractVideoLinksFromYoutube(self, video, params):
self.common.log(u"trying website: " + repr(params))
get = params.get
result = self.getVideoPageFromYoutube(get)
if self.isVideoAgeRestricted(result):
self.common.log(u"Age restricted video")
if self.pluginsettings.userHasProvidedValidCredentials():
self.login._httpLogin({"new":"true"})
result = self.getVideoPageFromYoutube(get)
else:
video[u"apierror"] = self.language(30622)
if result[u"status"] != 200:
self.common.log(u"Couldn't get video page from YouTube")
return ({}, video)
links = self.scrapeWebPageForVideoLinks(result, video)
if len(links) == 0 and not( "hlsvp" in video ):
self.common.log(u"Couldn't find video url- or stream-map.")
if not u"apierror" in video:
video[u'apierror'] = self.core._findErrors(result)
self.common.log(u"Done")
return (links, video)
| gpl-2.0 |
justacec/bokeh | examples/models/colorspec.py | 7 | 2153 | from __future__ import print_function
from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle
from bokeh.models import (
Plot, DataRange1d, LinearAxis, ColumnDataSource, PanTool, WheelZoomTool
)
from bokeh.resources import INLINE
source = ColumnDataSource(
data = dict(
x = [1, 2, 3, 4, 4, 5, 5],
y = [5, 4, 3, 2, 2.1, 1, 1.1],
color = ["rgb(0, 100, 120)", "green", "blue", "#2c7fb8", "#2c7fb8", "rgba(120, 230, 150, 0.5)", "rgba(120, 230, 150, 0.5)"]
)
)
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr)
circle = Circle(x="x", y="y", radius=0.2,
# Set the fill color to be dependent on the "color" field of the
# data source. If the field is missing, then the default value is
# used. Since no explicit default is provided, this picks up the
# default in FillProps, which is "gray".
fill_color="color",
# Alternative to using fill_color with rgba values, you can also use
# the fill_alpha setting to set the alpha values of your circle or
# other glyphs. This can be set as a single value or powered by a
# column in your data source. Uncomment the following line
# to see the effect.
# fill_alpha=0.2,
# An alternative form that explicitly sets a default value:
#fill_color={"default": "red", "field": "color"},
# Note that line_color is set to a fixed value. This can be any of
# the SVG named 147 colors, or a hex color string starting with "#",
# or a string "rgb(r,g,b)" or "rgba(r,g,b,a)".
# Any other string will be interpreted as a field name to look up
# on the datasource.
line_color="black")
plot.add_glyph(source, circle)
plot.add_layout(LinearAxis(), 'below')
plot.add_layout(LinearAxis(), 'left')
plot.add_tools(PanTool(), WheelZoomTool())
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
filename = "colorspec.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Demonstration of ColorSpec"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
terotic/digihel | digihel/tinymce.py | 1 | 2332 | import json
from django.templatetags.static import static
from wagtailtinymce.rich_text import TinyMCERichTextArea
class DigiHelTinyMCERichTextArea(TinyMCERichTextArea):
plugins = [
'anchor',
'autolink',
'charmap',
'code',
'codesample',
'contextmenu',
'definitionlist',
'directionality',
'emoticons',
'fullscreen',
'hr',
'image',
'imagetools',
'insertdatetime',
'link',
'lists',
'media',
'nonbreaking',
'pagebreak',
'paste',
'preview',
'print',
'searchreplace',
'table',
'template',
'textpattern',
'visualblocks',
'visualchars',
'wordcount',
]
style_formats = [
{
'title': 'Additional info',
'block': 'div',
'classes': 'more-info'
}
]
default_buttons = [
[
['undo', 'redo'],
['styleselect'],
['bold', 'italic'],
['bullist', 'numlist', 'ToggleDefinitionList', 'ToggleDefinitionItem', 'outdent', 'indent'],
['table'],
['link', 'unlink'],
['wagtaildoclink', 'wagtailimage', 'wagtailembed'],
['pastetext', 'fullscreen'],
['termlink'],
],
]
default_options = {
'image_advtab': True,
'browser_spellcheck': True,
'noneditable_leave_contenteditable': True,
'language': 'en',
'language_load': True,
'content_css': []
}
def build_js_init_arguments(self):
args = super(DigiHelTinyMCERichTextArea, self).build_js_init_arguments()
args.update(
plugins=self.plugins,
)
args.pop('menubar', None) # Always enable the menubar
args['content_css'].append(static('css/tinymce-content.css'))
args['skin'] = 'wagtail'
args['height'] = 600
args['style_formats'] = [
{'title': 'Additional info','block': 'section','classes': 'more-info', 'wrapper': 'true'},
{'title': 'Document link','inline': 'span','classes': 'document-link'},
]
args['style_formats_merge'] = True
args['extended_valid_elements'] = 'div[class]'
return args
| mit |
antgonza/qiime | scripts/trflp_file_to_otu_table.py | 15 | 2006 | #!/usr/bin/env python
# File created on 25 May 2010
from __future__ import division
__author__ = "Antonio Gonzalez Pena"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Antonio Gonzalez Pena", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Antonio Gonzalez Pena"
__email__ = "antgonza@gmail.com"
from os.path import isfile
from sys import stderr
from biom.table import Table
from qiime.util import (make_option, write_biom_table,
parse_command_line_parameters, get_options_lookup)
from qiime.parse import parse_trflp
options_lookup = get_options_lookup()
script_info = {}
script_info[
'brief_description'] = """Convert TRFLP text file to an OTU table"""
script_info[
'script_description'] = """The input for this script is a TRLFP text file. The output of this script is an OTU table text file that can be use with QIIME for further analysis """
script_info['script_usage'] = [
("""Usage:""", """You need to pass a TRFLP text, the script will remove not wanted chars sample and otus names, and will add zeros as need it""",
"""%prog -i trflp_in.txt -o otu_table.biom""")
]
script_info['output_description'] = ""
script_info['required_options'] = [
make_option('-i', '--input_path',
type='existing_filepath',
help='input path: TRFLP text file'),
make_option('-o', '--output_path',
type='new_filepath',
help="output file: OTU table"),
]
script_info['optional_options'] = []
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if not isfile(opts.input_path):
raise IOError(
"Input path (%s) not valid. Does it exist?" %
opts.input_path)
samples, otus, data = parse_trflp(open(opts.input_path, 'U'))
t = Table(data, otus, samples)
write_biom_table(t, opts.output_path)
if __name__ == "__main__":
main()
| gpl-2.0 |
appliedx/edx-platform | lms/djangoapps/teams/migrations/0004_auto__add_field_courseteam_discussion_topic_id__add_field_courseteam_l.py | 46 | 6547 | # -*- coding: utf-8 -*-
import pytz
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseTeam.last_activity_at'
db.add_column('teams_courseteam', 'last_activity_at',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 17, 0, 0).replace(tzinfo=pytz.utc)),
keep_default=False)
# Adding field 'CourseTeamMembership.last_activity_at'
db.add_column('teams_courseteammembership', 'last_activity_at',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2015, 8, 17, 0, 0).replace(tzinfo=pytz.utc)),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseTeam.last_activity_at'
db.delete_column('teams_courseteam', 'last_activity_at')
# Deleting field 'CourseTeamMembership.last_activity_at'
db.delete_column('teams_courseteammembership', 'last_activity_at')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'teams.courseteam': {
'Meta': {'object_name': 'CourseTeam'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'discussion_topic_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('student.models.LanguageField', [], {'max_length': '16', 'blank': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'team_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'topic_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.CourseTeamMembership']", 'to': "orm['auth.User']"})
},
'teams.courseteammembership': {
'Meta': {'unique_together': "(('user', 'team'),)", 'object_name': 'CourseTeamMembership'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'membership'", 'to': "orm['teams.CourseTeam']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['teams']
| agpl-3.0 |
UManPychron/pychron | pychron/experiment/conditional/conditional.py | 1 | 22769 | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
import pprint
from traits.api import Str, Either, Int, Callable, Bool, Float, Enum, List
# ============= standard library imports ========================
from traits.trait_types import BaseStr
from uncertainties import nominal_value, std_dev
# ============= local library imports ==========================
from pychron.core.yaml import yload
from pychron.experiment.conditional.regexes import MAPPER_KEY_REGEX, \
STD_REGEX, INTERPOLATE_REGEX, EXTRACTION_STR_ABS_REGEX, EXTRACTION_STR_PERCENT_REGEX
from pychron.experiment.conditional.utilities import tokenize, get_teststr_attr_func, extract_attr
from pychron.experiment.utilities.conditionals import RUN, QUEUE, SYSTEM
from pychron.loggable import Loggable
from pychron.paths import paths
def dictgetter(d, attrs, default=None):
if not isinstance(attrs, tuple):
attrs = (attrs,)
for ai in attrs:
try:
return d[ai]
except KeyError:
pass
else:
return default
def conditionals_from_file(p, name=None, level=SYSTEM, **kw):
yd = yload(p)
cs = (('TruncationConditional', 'truncation', 'truncations'),
('ActionConditional', 'action', 'actions'),
('ActionConditional', 'action', 'post_run_actions'),
('TerminationConditional', 'termination', 'terminations'),
('TerminationConditional', 'pre_run_termination', 'pre_run_terminations'),
('TerminationConditional', 'post_run_termination', 'post_run_terminations'),
('CancelationConditional', 'cancelation', 'cancelations'))
conddict = {}
for klass, _, tag in cs:
if name and tag != name:
continue
yl = yd.get(tag)
if not yl:
continue
# print 'yyyy', yl
# var = getattr(self, '{}_conditionals'.format(var))
conds = [conditional_from_dict(ti, klass, level=level, location=p, **kw) for ti in yl]
# print 'ffff', conds
conds = [c for c in conds if c is not None]
if conds:
conddict[tag] = conds
# var.extend(conds)
if name:
try:
conddict = conddict[name]
except KeyError:
conddict = None
return conddict
def conditional_from_dict(cd, klass, level=None, location=None, **kw):
if isinstance(klass, str):
klass = globals()[klass]
# try:
# teststr = cd['teststr']
# except KeyError:
# #for pre 2.0.5 conditionals files
# teststr = cd.get('check')
# if not teststr:
# return
# attr = cd.get('attr')
# if not attr:
# return
teststr = dictgetter(cd, ('teststr', 'comp', 'check'))
if not teststr:
return
# start = dictgetter(cd, ('start', 'start_count'), default=50)
# freq = cd.get('frequency', 1)
# win = cd.get('window', 0)
# mapper = cd.get('mapper', '')
#
# ntrips = cd.get('ntrips', 1)
#
# analysis_types = cd.get('analysis_types')
# if analysis_types:
# analysis_types = [a.lower() for a in analysis_types]
# attr = extract_attr(teststr)
# cx = klass(teststr, start_count=start, frequency=freq,
# attr=attr,
# window=win, mapper=mapper, action=action,
# ntrips=ntrips, analysis_types=analysis_types,
# **kw)
cx = klass(teststr)
cx.from_dict(teststr, cd, kw)
if level:
cx.level = level
if location:
if os.path.isfile(location):
location = os.path.relpath(location, paths.root_dir)
cx.location = location
return cx
class BaseConditional(Loggable):
attr = Str
teststr = Str
start_count = Int
level = Enum(None, SYSTEM, QUEUE, RUN)
tripped = Bool
location = Str
def from_dict(self, teststr, cd, kw):
start = dictgetter(cd, ('start', 'start_count'), default=50)
freq = cd.get('frequency', 1)
win = cd.get('window', 0)
mapper = cd.get('mapper', '')
ntrips = cd.get('ntrips', 1)
analysis_types = cd.get('analysis_types', [])
if analysis_types:
analysis_types = [a.lower().replace(' ', '_') for a in analysis_types]
else:
analysis_types = []
attr = extract_attr(teststr)
self.trait_set(start_count=start, frequency=freq,
attr=attr,
window=win, mapper=mapper,
ntrips=ntrips,
analysis_types=analysis_types,
**kw)
self._from_dict_hook(cd)
def _from_dict_hook(self, cd):
pass
def to_string(self):
raise NotImplementedError
def check(self, run, data, cnt):
"""
check conditional if cnt is greater than start count
cnt-start count is greater than 0
and cnt-start count is divisable by frequency
returns True if check passes. e.i. Write checks to trip on success. To terminate if Ar36 intensity is less than
x use Ar36<x
:param run: ``AutomatedRun``
:param data: 2-tuple. (keys, signals) where keys==detector names, signals== measured intensities
:param cnt: int
:return: True if check passes. e.i. Write checks to trip on success.
"""
if self._should_check(run, data, cnt):
return self._check(run, data)
def _check(self, run, data):
raise NotImplementedError
def _should_check(self, run, data, cnt):
return True
def __repr__(self):
return self.to_string()
class AutomatedRunConditional(BaseConditional):
frequency = Int
message = Str
# used to specify a window (in counts) of data to average, etc.
window = Int
mapper = Str
analysis_types = List
_mapper_key = ''
active = True
value = Float
ntrips = Int(1)
trips = 0
_teststr = None
_ctx = None
value_context = None
# def __init__(self, attr, teststr,
# start_count=0,
# frequency=1,
# *args, **kw):
def __init__(self, teststr,
start_count=0,
frequency=1,
*args, **kw):
self.active = True
# self.attr = attr
self.teststr = teststr
self.start_count = start_count
self.frequency = frequency
super(AutomatedRunConditional, self).__init__(*args, **kw)
def to_string(self):
s = '{} {}'.format(self.teststr, self.message)
return s
def to_dict(self):
d = self._attr_dict()
d['hash_id'] = self._hash_id(d)
return d
def _hash_id(self, d=None):
if d is None:
d = self._attr_dict()
return hash(frozenset(list(d.items())))
def _attr_dict(self):
return {'teststr': self.teststr, 'start_count': self.start_count,
'frequency': self.frequency, 'ntrips': self.ntrips,
'level': self.level,
'analysis_types': tuple(self.analysis_types), 'location': self.location}
def result_dict(self):
hash_id = self._hash_id()
return {'teststr': self._teststr, 'context': self.value_context, 'hash_id': hash_id}
def _should_check(self, run, data, cnt):
if self.analysis_types:
# check if checking should be done on this run based on analysis_type
atype = run.spec.analysis_type.lower()
if 'blank' in self.analysis_types:
if atype.startswith('blank'):
return
if atype not in self.analysis_types:
return
if self.active:
if isinstance(cnt, bool):
return cnt
else:
ocnt = cnt - self.start_count
b = ocnt > 0
c = ocnt % self.frequency == 0
cnt_flag = b and c
return cnt_flag
def _check(self, run, data, verbose=False):
"""
make a teststr and context from the run and data
evaluate the teststr with the context
"""
teststr, ctx = self._make_context(run, data)
self._teststr, self._ctx = teststr, ctx
self.value_context = vc = pprint.pformat(ctx, width=1)
self.debug('testing {}'.format(teststr))
if verbose:
self.debug('attribute context {}'.format(pprint.pformat(self._attr_dict(), width=1)))
msg = 'evaluate ot="{}" t="{}", ctx="{}"'.format(self.teststr, teststr, vc)
self.debug(msg)
if teststr and ctx:
if eval(teststr, ctx):
self.trips += 1
self.debug('condition {} is true trips={}/{}'.format(teststr, self.trips,
self.ntrips))
if self.trips >= self.ntrips:
self.tripped = True
self.message = 'condition {} is True'.format(teststr)
self.trips = 0
return True
else:
self.trips = 0
def _make_context(self, obj, data):
teststr = self.teststr
ctx = {}
tt = []
for ti, oper in tokenize(teststr):
ts, attr, func = get_teststr_attr_func(ti)
attr = attr.replace('(', '_').replace(')', '_')
ts = ts.replace('(', '_').replace(')', '_')
v = func(obj, data, self.window)
if v is not None:
vv = std_dev(v) if STD_REGEX.match(teststr) else nominal_value(v)
vv = self._map_value(vv)
ctx[attr] = vv
ts = self._interpolate_teststr(ts, obj, data)
tt.append(ts)
if oper:
tt.append(oper)
return ' '.join(tt), ctx
def _map_value(self, vv):
if self.mapper:
m = MAPPER_KEY_REGEX.search(self.mapper)
if m:
key = m.group(0)
vv = eval(self.mapper, {key: vv})
return vv
def _interpolate_teststr(self, ts, obj, data):
nts = ts
for temp in INTERPOLATE_REGEX.finditer(ts):
temp = temp.group(0)
new = obj.get_interpolated_value(temp)
nts = nts.replace(temp, str(new))
return nts
class TruncationConditional(AutomatedRunConditional):
"""
stops the current measurement and continues to next step in pyscript.
If more measure calls are main use abbreviated_count_ratio to reduce
the number of counts. for example of abbreviated_count_ratio = 0.5 and
the original baseline counts = 100, only 50 counts will be made for a truncated
run.
"""
abbreviated_count_ratio = 1.0
class TerminationConditional(AutomatedRunConditional):
"""
Stop the current analysis immediately. Don't save to database.
Continue to next run in experiment queue
"""
class CancelationConditional(AutomatedRunConditional):
"""
Stop the current analysis immediately then stop the experiment.
"""
class ActionConditional(AutomatedRunConditional):
"""
Executes a specified action. The action string is executed as pyscript snippet. actions
therefore may be any valid measuremnt pyscript code. for example::
# call a gosub
gosub("someGoSub")
# open a valve
open("V")
"""
action = Either(Str, Callable)
resume = Bool # resume==True the script continues execution else break out of measure_iteration
def _from_dict_hook(self, cd):
for tag in ('action', 'resume'):
if tag in cd:
setattr(self, tag, cd[tag])
def perform(self, script):
"""
perform the specified action.
use ``MeasurementPyScript.execute_snippet`` to perform desired action
:param script: MeasurementPyScript
"""
action = self.action
if isinstance(action, str):
try:
script.execute_snippet(action)
except BaseException:
self.warning('Invalid action: "{}"'.format(action))
elif hasattr(action, '__call__'):
action()
MODIFICATION_ACTIONS = ('Skip Next Run', 'Skip N Runs', 'Skip Aliquot', 'Skip to Last in Aliquot', 'Set Extract')
class ExtractionStr(BaseStr):
def validate(self, obj, name, value):
if value == '':
return value
for r in (EXTRACTION_STR_ABS_REGEX, EXTRACTION_STR_PERCENT_REGEX):
if r.match(value):
return value
else:
self.error(obj, name, value)
def get_extraction_steps(s):
use_percent = bool(EXTRACTION_STR_PERCENT_REGEX.match(s))
def gen():
for si in s.split(','):
si = si.strip()
if si.endswith('%'):
si = si[:-1]
yield float(si)
raise StopIteration
return gen, use_percent
class QueueModificationConditional(AutomatedRunConditional):
use_truncation = Bool
use_termination = Bool
nskip = Int
action = Enum(MODIFICATION_ACTIONS)
extraction_str = ExtractionStr
def do_modifications(self, queue, current_run):
runs = queue.cleaned_automated_runs
func = getattr(self, self.action.lower().replace(' ', '_'))
func(runs, current_run)
queue.refresh_table_needed = True
def _from_dict_hook(self, cd):
for tag in ('action', 'nskip', 'use_truncation', 'use_termination'):
if tag in cd:
setattr(self, tag, cd[tag])
def _skip_n_runs(self, runs, current_run, n=None):
if n is None:
n = self.nskip
for i in range(n):
r = runs[i]
r.skip = True
def _skip_next_run(self, runs, current_run):
self._skip_n_runs(runs, current_run, 1)
def _skip_aliquot(self, runs, current_run):
identifier = current_run.spec.identifier
aliquot = current_run.spec.aliquot
for r in runs:
if r.is_special():
continue
if r.identifier == identifier and r.aliquot == aliquot:
r.skip = True
def _skip_to_last_in_aliquot(self, runs, current_run):
identifier = current_run.spec.identifier
for i, r in enumerate(runs):
if r.is_special():
continue
try:
nrun = runs[i + 1]
if nrun.identifier != identifier:
break
else:
r.skip = True
except IndexError:
pass
def _set_extract(self, runs, current_run):
es = self.extraction_str
identifier = current_run.spec.identifier
aliquot = current_run.spec.aliquot
steps_gen, use_percent = get_extraction_steps(es)
for r in runs:
if r.is_special():
continue
if r.identifier != identifier or r.aliquot != aliquot:
break
try:
nstep = next(steps_gen)
if use_percent:
r.extract_value *= (1 + nstep / 100.)
else:
r.extract_value += nstep
except StopIteration:
break
# ============= EOF =============================================
# attr = extract_attr(token)
# tkey = attr
# def default_wrapper(teststr, found):
# teststr = '{}{}'.format(tkey, remove_attr(teststr))
# return teststr, tkey
#
# def between_wrapper(teststr, func, between):
# v = None
# args = ARGS_REGEX.search(between).group(0)[1:-1].split(',')
# key = args[0]
# if '.' in key:
# key = key.split('.')[0].strip()
# v = 0
# # v = self.get_modified_value(arun, key, key)
#
# v1, v2 = args[1:]
# nc = '{}<={}<={}'.format(v1, key, v2)
#
# teststr = teststr.replace(between, nc)
# if between.startswith('not '):
# teststr = 'not {}'.format(teststr)
#
# if v is None:
# v = func()
# return v, teststr, key
#
# def ratio_wrapper(teststr, func, ratio):
# v = obj.get_value(ratio)
# key = 'ratio{}'.format(ratio.replace('/', ''))
# teststr = '{}{}'.format(key, remove_attr(teststr))
# return v, teststr, key
# for aa in ((CP_REGEX, lambda obj, data, window: obj.arar_age.get_current_intensity(attr)),
# (BASELINECOR_REGEX, lambda obj, data, window: obj.garar_age.et_baseline_corrected_value(attr)),
# (BASELINE_REGEX, lambda obj, data, window: obj.arar_age.get_baseline_value(attr)),
# (ACTIVE_REGEX, lambda obj, data, window: not attr in data[0]),
# (AVG_REGEX, lambda obj, data, window: obj.arar_age.get_values(attr, window or -1).mean()),
# (MAX_REGEX, lambda obj, data, window: obj.arar_age.get_values(attr, window or -1).max()),
# (MIN_REGEX, lambda obj, data, window: obj.arar_age.get_values(attr, window or -1).min()),
# (SLOPE_REGEX, lambda obj, data, window: obj.arar_age.get_slope(attr, window or -1)),
# (DEFLECTION_REGEX, lambda obj, data, window: obj.get_deflection(attr, current=True)),
# (RATIO_REGEX, None, ratio_wrapper),
# (BETWEEN_REGEX, lambda obj, data, window: obj.arar_age.get_value(attr), between_wrapper)):
#
# if len(aa) == 2:
# wrapper = default_wrapper
# reg, func = aa
# else:
# reg, func, wrapper = aa
#
# found = reg.match(attr)
# if found:
# args = wrapper(attr, found.group(0))
# if args:
# teststr, tkey = args
# # teststr, tkey = args
# break
# else:
# teststr='asdf'
# def _get_simple_key(self, teststr):
# m = PARENTHESES_REGEX.findall(teststr)
# if m:
# key = m[0][1:-1]
# else:
# m = KEY_REGEX.findall(teststr)
# if m:
# k = m[0]
# if k in ('not',):
# k = m[1]
# key = k
# else:
# key = self.attr
# return key
# def _check(self, arun, data):
# obj = arun.arar_age
# # attr = self.attr
#
# cc = self.teststr
# invert = False
# if cc.startswith('not '):
# cc = cc[4:]
# invert = True
#
# tkey = self._get_simple_key(cc)
#
# def default_wrapper(teststr, func, found):
# teststr = '{}{}'.format(tkey, remove_attr(teststr))
# return func(), teststr, tkey
#
# def between_wrapper(teststr, func, between):
# v = None
# args = ARGS_REGEX.search(between).group(0)[1:-1].split(',')
# key = args[0]
# if '.' in key:
# key = key.split('.')[0].strip()
# v = self.get_modified_value(arun, key, key)
#
# v1, v2 = args[1:]
# nc = '{}<={}<={}'.format(v1, key, v2)
#
# teststr = teststr.replace(between, nc)
# if between.startswith('not '):
# teststr = 'not {}'.format(teststr)
#
# if v is None:
# v = func()
# return v, teststr, key
#
# def ratio_wrapper(teststr, func, ratio):
# v = obj.get_value(ratio)
# key = 'ratio{}'.format(ratio.replace('/', ''))
# teststr = '{}{}'.format(key, remove_attr(teststr))
# return v, teststr, key
#
# for aa in ((CP_REGEX, lambda: obj.get_current_intensity(attr)),
# (BASELINECOR_REGEX, lambda: obj.get_baseline_corrected_value(attr)),
# (BASELINE_REGEX, lambda: obj.get_baseline_value(attr)),
# (ACTIVE_REGEX, lambda: not attr in data[0]),
# (AVG_REGEX, lambda: obj.get_values(attr, self.window or -1).mean()),
# (MAX_REGEX, lambda: obj.get_values(attr, self.window or -1).max()),
# (MIN_REGEX, lambda: obj.get_values(attr, self.window or -1).min()),
# (SLOPE_REGEX, lambda: obj.get_slope(attr, self.window or -1)),
# (DEFLECTION_REGEX, lambda: arun.get_deflection(attr, current=True)),
# (RATIO_REGEX, None, ratio_wrapper),
# (BETWEEN_REGEX, lambda: obj.get_value(attr), between_wrapper)):
#
# if len(aa) == 2:
# wrapper = default_wrapper
# reg, func = aa
# else:
# reg, func, wrapper = aa
#
# found = reg.match(cc)
# if found:
# args = wrapper(cc, func, found.group(0))
# if args:
# v, teststr, tkey = args
# break
# else:
# teststr = cc
# try:
# if self.window:
# vs = obj.get_values(attr, self.window)
# if not vs:
# self.warning('Deactivating check. check attr invalid for use with window')
# self.active = False
# return
# v = ufloat(vs.mean(), vs.std())
# else:
# v = obj.get_value(attr)
# except Exception, e:
# self.warning('Deactivating check. Check Exception "{}."'.format(e))
# self.active = False
#
# if tkey == 'age':
# atype = arun.spec.analysis_type
# if not atype in AGE_TESTABLE:
# msg = 'age conditional for {} not allowed'.format(atype)
# self.unique_warning(msg)
# return
#
# if v is not None:
# vv = std_dev(v) if STD_REGEX.match(teststr) else nominal_value(v)
# vv = self._map_value(vv)
# self.value = vv
# if invert:
# teststr = 'not {}'.format(teststr)
#
# self.debug('testing {} (eval={}) key={} attr={} value={} mapped_value={}'.format(self.teststr, teststr,
# tkey, self.attr, v,
# vv))
# if eval(teststr, {tkey: vv}):
# self.debug('condition {} is true'.format(teststr))
# self.message = 'attr={}, value= {} {} is True'.format(self.attr, vv, self.teststr)
# return True
| apache-2.0 |
elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 1B/instances/6_workflow_full_10files_primary_1sh_1rs_with_annot_with_proj_3s/init_0/DataStoreInit.py | 1 | 2757 | #!/usr/bin/env python
from pymongo import MongoClient
import pymongo
HOST = "wfSciwoncGW:enw1989@172.31.28.140:27001/?authSource=admin"
c = MongoClient('mongodb://'+HOST)
dbname = "googlew"
task = "task_events"
ginfo = "general_info"
statscpumemory = "stats_cpumemory"
maxmincpumemory = "maxmin_cpumemory"
mediancpu = "median_cpu"
medianmemory = "median_memory"
tinfo = "task_events_info"
ratio = "ratio"
avgratio = "average_ratioevent"
analysis = "analysis_ratio"
db = c[dbname]
task_col = db[task]
c[dbname].drop_collection(ginfo)
c[dbname].create_collection(ginfo)
c[dbname].drop_collection(statscpumemory)
c[dbname].create_collection(statscpumemory)
c[dbname].drop_collection(maxmincpumemory)
c[dbname].create_collection(maxmincpumemory)
c[dbname].drop_collection(mediancpu)
c[dbname].create_collection(mediancpu)
c[dbname].drop_collection(medianmemory)
c[dbname].create_collection(medianmemory)
c[dbname].drop_collection(tinfo)
c[dbname].create_collection(tinfo)
c[dbname].drop_collection(ratio)
c[dbname].create_collection(ratio)
c[dbname].drop_collection(avgratio)
c[dbname].create_collection(avgratio)
c[dbname].drop_collection(analysis)
c[dbname].create_collection(analysis)
db = c[dbname]
task_col = db[task]
ginfo_col = db[ginfo]
statscpumemory_col = db[statscpumemory]
maxmincpumemory_col = db[maxmincpumemory]
mediancpu_col = db[mediancpu]
medianmemory_col = db[medianmemory]
tinfo_col = db[tinfo]
ratio_col = db[ratio]
avgratio_col = db[avgratio]
analysis_col = db[analysis]
task_col.create_index([("CPU request", pymongo.ASCENDING)])
task_col.create_index([("memory request", pymongo.ASCENDING)])
task_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
task_col.create_index([("event type", pymongo.ASCENDING)])
ratio_col.create_index([("event type", pymongo.ASCENDING)])
ginfo_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
statscpumemory_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
maxmincpumemory_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
mediancpu_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
medianmemory_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
tinfo_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
ratio_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
avgratio_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
analysis_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
| gpl-3.0 |
malishevg/edugraph | cms/djangoapps/contentstore/features/grading.py | 10 | 7108 | # pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from common import *
from terrain.steps import reload_the_page
from selenium.common.exceptions import (
InvalidElementStateException, WebDriverException)
from nose.tools import assert_in, assert_not_in, assert_equal, assert_not_equal # pylint: disable=E0611
@step(u'I am viewing the grading settings')
def view_grading_settings(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-grading a'
world.css_click(link_css)
@step(u'I add "([^"]*)" new grade')
def add_grade(step, many):
grade_css = '.new-grade-button'
for i in range(int(many)):
world.css_click(grade_css)
@step(u'I delete a grade')
def delete_grade(step):
#grade_css = 'li.grade-specific-bar > a.remove-button'
#range_css = '.grade-specific-bar'
#world.css_find(range_css)[1].mouseover()
#world.css_click(grade_css)
world.browser.execute_script('document.getElementsByClassName("remove-button")[0].click()')
@step(u'I see I now have "([^"]*)" grades$')
def view_grade_slider(step, how_many):
grade_slider_css = '.grade-specific-bar'
all_grades = world.css_find(grade_slider_css)
assert_equal(len(all_grades), int(how_many))
@step(u'I move a grading section')
def move_grade_slider(step):
moveable_css = '.ui-resizable-e'
f = world.css_find(moveable_css).first
f.action_chains.drag_and_drop_by_offset(f._element, 100, 0).perform()
@step(u'I see that the grade range has changed')
def confirm_change(step):
range_css = '.range'
all_ranges = world.css_find(range_css)
for i in range(len(all_ranges)):
assert_not_equal(world.css_html(range_css, index=i), '0-50')
@step(u'I change assignment type "([^"]*)" to "([^"]*)"$')
def change_assignment_name(step, old_name, new_name):
name_id = '#course-grading-assignment-name'
index = get_type_index(old_name)
f = world.css_find(name_id)[index]
assert_not_equal(index, -1)
for count in range(len(old_name)):
f._element.send_keys(Keys.END, Keys.BACK_SPACE)
f._element.send_keys(new_name)
@step(u'I go back to the main course page')
def main_course_page(step):
course_name = world.scenario_dict['COURSE'].display_name.replace(' ', '_')
main_page_link = '/course/{org}.{number}.{name}/branch/draft/block/{name}'.format(
org=world.scenario_dict['COURSE'].org,
number=world.scenario_dict['COURSE'].number,
name=course_name
)
world.visit(main_page_link)
assert_in('Course Outline', world.css_text('h1.page-header'))
@step(u'I do( not)? see the assignment name "([^"]*)"$')
def see_assignment_name(step, do_not, name):
assignment_menu_css = 'ul.menu > li > a'
# First assert that it is there, make take a bit to redraw
assert_true(
world.css_find(assignment_menu_css),
msg="Could not find assignment menu"
)
assignment_menu = world.css_find(assignment_menu_css)
allnames = [item.html for item in assignment_menu]
if do_not:
assert_not_in(name, allnames)
else:
assert_in(name, allnames)
@step(u'I delete the assignment type "([^"]*)"$')
def delete_assignment_type(step, to_delete):
delete_css = '.remove-grading-data'
world.css_click(delete_css, index=get_type_index(to_delete))
@step(u'I add a new assignment type "([^"]*)"$')
def add_assignment_type(step, new_name):
add_button_css = '.add-grading-data'
world.css_click(add_button_css)
name_id = '#course-grading-assignment-name'
new_assignment = world.css_find(name_id)[-1]
new_assignment._element.send_keys(new_name)
@step(u'I set the assignment weight to "([^"]*)"$')
def set_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
weight_field = world.css_find(weight_id)[-1]
old_weight = world.css_value(weight_id, -1)
for count in range(len(old_weight)):
weight_field._element.send_keys(Keys.END, Keys.BACK_SPACE)
weight_field._element.send_keys(weight)
@step(u'the assignment weight is displayed as "([^"]*)"$')
def verify_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
assert_equal(world.css_value(weight_id, -1), weight)
@step(u'I have populated the course')
def populate_course(step):
step.given('I have added a new section')
step.given('I have added a new subsection')
@step(u'I do not see the changes persisted on refresh$')
def changes_not_persisted(step):
reload_the_page(step)
name_id = '#course-grading-assignment-name'
assert_equal(world.css_value(name_id), 'Homework')
@step(u'I see the assignment type "(.*)"$')
def i_see_the_assignment_type(_step, name):
assignment_css = '#course-grading-assignment-name'
assignments = world.css_find(assignment_css)
types = [ele['value'] for ele in assignments]
assert_in(name, types)
@step(u'I change the highest grade range to "(.*)"$')
def change_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
grade.value = range_name
@step(u'I see the highest grade range is "(.*)"$')
def i_see_highest_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
assert_equal(grade.value, range_name)
@step(u'I cannot edit the "Fail" grade range$')
def cannot_edit_fail(_step):
range_css = 'span.letter-grade'
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
# try to change the grade range -- this should throw an exception
try:
ranges.last.value = 'Failure'
except (InvalidElementStateException):
pass # We should get this exception on failing to edit the element
# check to be sure that nothing has changed
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
@step(u'I change the grace period to "(.*)"$')
def i_change_grace_period(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
ele = world.css_find(grace_period_css).first
# Sometimes it takes a moment for the JavaScript
# to populate the field. If we don't wait for
# this to happen, then we can end up with
# an invalid value (e.g. "00:0048:00")
# which prevents us from saving.
assert_true(world.css_has_value(grace_period_css, "00:00"))
# Set the new grace period
ele.value = grace_period
@step(u'I see the grace period is "(.*)"$')
def the_grace_period_is(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
# The default value is 00:00
# so we need to wait for it to change
world.wait_for(
lambda _: world.css_has_value(grace_period_css, grace_period)
)
def get_type_index(name):
name_id = '#course-grading-assignment-name'
all_types = world.css_find(name_id)
for index in range(len(all_types)):
if world.css_value(name_id, index=index) == name:
return index
return -1
| agpl-3.0 |
switowski/invenio | invenio/utils/mimetype.py | 17 | 8266 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio mimetype helper functions.
Usage example:
TODO
"""
import re
from invenio.base.globals import cfg
from mimetypes import MimeTypes
from six import iteritems
from werkzeug import cached_property, LocalProxy
from thread import get_ident
try:
import magic
if hasattr(magic, "open"):
CFG_HAS_MAGIC = 1
elif hasattr(magic, "Magic"):
CFG_HAS_MAGIC = 2
except ImportError:
CFG_HAS_MAGIC = 0
_magic_cookies = {}
if CFG_HAS_MAGIC == 1:
def _get_magic_cookies():
"""
@return: a tuple of magic object.
@rtype: (MAGIC_NONE, MAGIC_COMPRESS, MAGIC_MIME, MAGIC_COMPRESS + MAGIC_MIME)
@note: ... not real magic. Just see: man file(1)
"""
thread_id = get_ident()
if thread_id not in _magic_cookies:
_magic_cookies[thread_id] = {
magic.MAGIC_NONE: magic.open(magic.MAGIC_NONE),
magic.MAGIC_COMPRESS: magic.open(magic.MAGIC_COMPRESS),
magic.MAGIC_MIME: magic.open(magic.MAGIC_MIME),
magic.MAGIC_COMPRESS + magic.MAGIC_MIME: magic.open(magic.MAGIC_COMPRESS + magic.MAGIC_MIME),
magic.MAGIC_MIME_TYPE: magic.open(magic.MAGIC_MIME_TYPE),
}
for key in _magic_cookies[thread_id].keys():
_magic_cookies[thread_id][key].load()
return _magic_cookies[thread_id]
elif CFG_HAS_MAGIC == 2:
def _magic_wrapper(local_path, mime=True, mime_encoding=False):
thread_id = get_ident()
if (thread_id, mime, mime_encoding) not in _magic_cookies:
magic_object = _magic_cookies[thread_id, mime, mime_encoding] = magic.Magic(mime=mime, mime_encoding=mime_encoding)
else:
magic_object = _magic_cookies[thread_id, mime, mime_encoding]
return magic_object.from_file(local_path) # pylint: disable=E1103
class LazyMimeCache(object):
@cached_property
def mimes(self):
"""
Returns extended MimeTypes.
"""
_mimes = MimeTypes(strict=False)
_mimes.suffix_map.update({'.tbz2' : '.tar.bz2'})
_mimes.encodings_map.update({'.bz2' : 'bzip2'})
if cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES']:
for key, value in iteritems(cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_MIMETYPES']):
_mimes.add_type(key, value)
del key, value
return _mimes
@cached_property
def extensions(self):
"""
Generate the regular expression to match all the known extensions.
@return: the regular expression.
@rtype: regular expression object
"""
_tmp_extensions = self.mimes.encodings_map.keys() + \
self.mimes.suffix_map.keys() + \
self.mimes.types_map[1].keys() + \
cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS']
extensions = []
for ext in _tmp_extensions:
if ext.startswith('.'):
extensions.append(ext)
else:
extensions.append('.' + ext)
extensions.sort()
extensions.reverse()
extensions = set([ext.lower() for ext in extensions])
extensions = '\\' + '$|\\'.join(extensions) + '$'
extensions = extensions.replace('+', '\\+')
return re.compile(extensions, re.I)
#: Lazy mime and extensitons cache.
_mime_cache = LazyMimeCache()
#: MimeTypes instance.
_mimes = LocalProxy(lambda: _mime_cache.mimes)
#: Regular expression to recognized extensions.
_extensions = LocalProxy(lambda: _mime_cache.extensions)
# Use only functions bellow in your code:
def file_strip_ext(afile, skip_version=False, only_known_extensions=False, allow_subformat=True):
"""
Strip in the best way the extension from a filename.
>>> file_strip_ext("foo.tar.gz")
'foo'
>>> file_strip_ext("foo.buz.gz")
'foo.buz'
>>> file_strip_ext("foo.buz")
'foo'
>>> file_strip_ext("foo.buz", only_known_extensions=True)
'foo.buz'
>>> file_strip_ext("foo.buz;1", skip_version=False,
... only_known_extensions=True)
'foo.buz;1'
>>> file_strip_ext("foo.gif;icon")
'foo'
>>> file_strip_ext("foo.gif;icon", only_know_extensions=True,
... allow_subformat=False)
'foo.gif;icon'
@param afile: the path/name of a file.
@type afile: string
@param skip_version: whether to skip a trailing ";version".
@type skip_version: bool
@param only_known_extensions: whether to strip out only known extensions or
to consider as extension anything that follows a dot.
@type only_known_extensions: bool
@param allow_subformat: whether to consider also subformats as part of
the extension.
@type allow_subformat: bool
@return: the name/path without the extension (and version).
@rtype: string
"""
import os
afile = afile.split(';')
if len(afile)>1 and allow_subformat and not afile[-1].isdigit():
afile = afile[0:-1]
if len(afile)>1 and skip_version and afile[-1].isdigit():
afile = afile[0:-1]
afile = ';'.join(afile)
nextfile = _extensions.sub('', afile)
if nextfile == afile and not only_known_extensions:
nextfile = os.path.splitext(afile)[0]
while nextfile != afile:
afile = nextfile
nextfile = _extensions.sub('', afile)
return nextfile
def guess_mimetype_and_encoding(afile):
"""
Tries to guess mimetype and encoding of a file.
@param afile: the path/name of a file
@time afile: string
@return: the mimetype and encoding
@rtype: tuple
"""
return _mimes.guess_type(afile)
def guess_extension(amimetype, normalize=False):
"""
Tries to guess extension for a mimetype.
@param amimetype: name of a mimetype
@time amimetype: string
@return: the extension
@rtype: string
"""
ext = _mimes.guess_extension(amimetype)
if ext and normalize:
## Normalize some common magic mis-interpreation
ext = {'.asc': '.txt', '.obj': '.bin'}.get(ext, ext)
from invenio.legacy.bibdocfile.api_normalizer import normalize_format
return normalize_format(ext)
return ext
def get_magic_guesses(fullpath):
"""
Return all the possible guesses from the magic library about
the content of the file.
@param fullpath: location of the file
@type fullpath: string
@return: guesses about content of the file
@rtype: tuple
"""
if CFG_HAS_MAGIC == 1:
magic_cookies = _get_magic_cookies()
magic_result = []
for key in magic_cookies.keys():
magic_result.append(magic_cookies[key].file(fullpath))
return tuple(magic_result)
elif CFG_HAS_MAGIC == 2:
magic_result = []
for key in ({'mime': False, 'mime_encoding': False},
{'mime': True, 'mime_encoding': False},
{'mime': False, 'mime_encoding': True}):
magic_result.append(_magic_wrapper(fullpath, **key))
return tuple(magic_result)
def guess_extension_from_path(local_path):
try:
if CFG_HAS_MAGIC == 1:
magic_cookie = _get_magic_cookies()[magic.MAGIC_MIME_TYPE]
mimetype = magic_cookie.file(local_path)
elif CFG_HAS_MAGIC == 2:
mimetype = _magic_wrapper(local_path, mime=True, mime_encoding=False)
if CFG_HAS_MAGIC:
return guess_extension(mimetype, normalize=True)
except Exception:
pass
| gpl-2.0 |
abstract-open-solutions/OCB | addons/l10n_fr_hr_payroll/report/__init__.py | 424 | 1091 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import fiche_paye
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
smarinac/root | interpreter/llvm/src/tools/clang/tools/scan-view/Reporter.py | 65 | 8135 | """Methods for reporting bugs."""
import subprocess, sys, os
__all__ = ['ReportFailure', 'BugReport', 'getReporters']
#
class ReportFailure(Exception):
"""Generic exception for failures in bug reporting."""
def __init__(self, value):
self.value = value
# Collect information about a bug.
class BugReport:
def __init__(self, title, description, files):
self.title = title
self.description = description
self.files = files
# Reporter interfaces.
import os
import email, mimetypes, smtplib
from email import encoders
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
#===------------------------------------------------------------------------===#
# ReporterParameter
#===------------------------------------------------------------------------===#
class ReporterParameter:
def __init__(self, n):
self.name = n
def getName(self):
return self.name
def getValue(self,r,bugtype,getConfigOption):
return getConfigOption(r.getName(),self.getName())
def saveConfigValue(self):
return True
class TextParameter (ReporterParameter):
def getHTML(self,r,bugtype,getConfigOption):
return """\
<tr>
<td class="form_clabel">%s:</td>
<td class="form_value"><input type="text" name="%s_%s" value="%s"></td>
</tr>"""%(self.getName(),r.getName(),self.getName(),self.getValue(r,bugtype,getConfigOption))
class SelectionParameter (ReporterParameter):
def __init__(self, n, values):
ReporterParameter.__init__(self,n)
self.values = values
def getHTML(self,r,bugtype,getConfigOption):
default = self.getValue(r,bugtype,getConfigOption)
return """\
<tr>
<td class="form_clabel">%s:</td><td class="form_value"><select name="%s_%s">
%s
</select></td>"""%(self.getName(),r.getName(),self.getName(),'\n'.join(["""\
<option value="%s"%s>%s</option>"""%(o[0],
o[0] == default and ' selected="selected"' or '',
o[1]) for o in self.values]))
#===------------------------------------------------------------------------===#
# Reporters
#===------------------------------------------------------------------------===#
class EmailReporter:
def getName(self):
return 'Email'
def getParameters(self):
return map(lambda x:TextParameter(x),['To', 'From', 'SMTP Server', 'SMTP Port'])
# Lifted from python email module examples.
def attachFile(self, outer, path):
# Guess the content type based on the file's extension. Encoding
# will be ignored, although we should check for simple things like
# gzip'd or compressed files.
ctype, encoding = mimetypes.guess_type(path)
if ctype is None or encoding is not None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
fp = open(path)
# Note: we should handle calculating the charset
msg = MIMEText(fp.read(), _subtype=subtype)
fp.close()
else:
fp = open(path, 'rb')
msg = MIMEBase(maintype, subtype)
msg.set_payload(fp.read())
fp.close()
# Encode the payload using Base64
encoders.encode_base64(msg)
# Set the filename parameter
msg.add_header('Content-Disposition', 'attachment', filename=os.path.basename(path))
outer.attach(msg)
def fileReport(self, report, parameters):
mainMsg = """\
BUG REPORT
---
Title: %s
Description: %s
"""%(report.title, report.description)
if not parameters.get('To'):
raise ReportFailure('No "To" address specified.')
if not parameters.get('From'):
raise ReportFailure('No "From" address specified.')
msg = MIMEMultipart()
msg['Subject'] = 'BUG REPORT: %s'%(report.title)
# FIXME: Get config parameters
msg['To'] = parameters.get('To')
msg['From'] = parameters.get('From')
msg.preamble = mainMsg
msg.attach(MIMEText(mainMsg, _subtype='text/plain'))
for file in report.files:
self.attachFile(msg, file)
try:
s = smtplib.SMTP(host=parameters.get('SMTP Server'),
port=parameters.get('SMTP Port'))
s.sendmail(msg['From'], msg['To'], msg.as_string())
s.close()
except:
raise ReportFailure('Unable to send message via SMTP.')
return "Message sent!"
class BugzillaReporter:
def getName(self):
return 'Bugzilla'
def getParameters(self):
return map(lambda x:TextParameter(x),['URL','Product'])
def fileReport(self, report, parameters):
raise NotImplementedError
class RadarClassificationParameter(SelectionParameter):
def __init__(self):
SelectionParameter.__init__(self,"Classification",
[['1', 'Security'], ['2', 'Crash/Hang/Data Loss'],
['3', 'Performance'], ['4', 'UI/Usability'],
['6', 'Serious Bug'], ['7', 'Other']])
def saveConfigValue(self):
return False
def getValue(self,r,bugtype,getConfigOption):
if bugtype.find("leak") != -1:
return '3'
elif bugtype.find("dereference") != -1:
return '2'
elif bugtype.find("missing ivar release") != -1:
return '3'
else:
return '7'
class RadarReporter:
@staticmethod
def isAvailable():
# FIXME: Find this .scpt better
path = os.path.join(os.path.dirname(__file__),'Resources/GetRadarVersion.scpt')
try:
p = subprocess.Popen(['osascript',path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
return False
data,err = p.communicate()
res = p.wait()
# FIXME: Check version? Check for no errors?
return res == 0
def getName(self):
return 'Radar'
def getParameters(self):
return [ TextParameter('Component'), TextParameter('Component Version'),
RadarClassificationParameter() ]
def fileReport(self, report, parameters):
component = parameters.get('Component', '')
componentVersion = parameters.get('Component Version', '')
classification = parameters.get('Classification', '')
personID = ""
diagnosis = ""
config = ""
if not component.strip():
component = 'Bugs found by clang Analyzer'
if not componentVersion.strip():
componentVersion = 'X'
script = os.path.join(os.path.dirname(__file__),'Resources/FileRadar.scpt')
args = ['osascript', script, component, componentVersion, classification, personID, report.title,
report.description, diagnosis, config] + map(os.path.abspath, report.files)
# print >>sys.stderr, args
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except:
raise ReportFailure("Unable to file radar (AppleScript failure).")
data, err = p.communicate()
res = p.wait()
if res:
raise ReportFailure("Unable to file radar (AppleScript failure).")
try:
values = eval(data)
except:
raise ReportFailure("Unable to process radar results.")
# We expect (int: bugID, str: message)
if len(values) != 2 or not isinstance(values[0], int):
raise ReportFailure("Unable to process radar results.")
bugID,message = values
bugID = int(bugID)
if not bugID:
raise ReportFailure(message)
return "Filed: <a href=\"rdar://%d/\">%d</a>"%(bugID,bugID)
###
def getReporters():
reporters = []
if RadarReporter.isAvailable():
reporters.append(RadarReporter())
reporters.append(EmailReporter())
return reporters
| lgpl-2.1 |
lanbing510/GTDWeb | django/core/files/base.py | 487 | 5717 | from __future__ import unicode_literals
import os
from io import BytesIO, StringIO, UnsupportedOperation
from django.core.files.utils import FileProxyMixin
from django.utils import six
from django.utils.encoding import (
force_bytes, force_str, python_2_unicode_compatible, smart_text,
)
@python_2_unicode_compatible
class File(FileProxyMixin):
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file, name=None):
self.file = file
if name is None:
name = getattr(file, 'name', None)
self.name = name
if hasattr(file, 'mode'):
self.mode = file.mode
def __str__(self):
return smart_text(self.name or '')
def __repr__(self):
return force_str("<%s: %s>" % (self.__class__.__name__, self or "None"))
def __bool__(self):
return bool(self.name)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __len__(self):
return self.size
def _get_size_from_underlying_file(self):
if hasattr(self.file, 'size'):
return self.file.size
if hasattr(self.file, 'name'):
try:
return os.path.getsize(self.file.name)
except (OSError, TypeError):
pass
if hasattr(self.file, 'tell') and hasattr(self.file, 'seek'):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
size = self.file.tell()
self.file.seek(pos)
return size
raise AttributeError("Unable to determine the file's size.")
def _get_size(self):
if hasattr(self, '_size'):
return self._size
self._size = self._get_size_from_underlying_file()
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def _get_closed(self):
return not self.file or self.file.closed
closed = property(_get_closed)
def chunks(self, chunk_size=None):
"""
Read the file and yield chunks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
try:
self.seek(0)
except (AttributeError, UnsupportedOperation):
pass
while True:
data = self.read(chunk_size)
if not data:
break
yield data
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.size > chunk_size
def __iter__(self):
# Iterate over this file-like object by newlines
buffer_ = None
for chunk in self.chunks():
for line in chunk.splitlines(True):
if buffer_:
if endswith_cr(buffer_) and not equals_lf(line):
# Line split after a \r newline; yield buffer_.
yield buffer_
# Continue with line.
else:
# Line either split without a newline (line
# continues after buffer_) or with \r\n
# newline (line == b'\n').
line = buffer_ + line
# buffer_ handled, clear it.
buffer_ = None
# If this is the end of a \n or \r\n line, yield.
if endswith_lf(line):
yield line
else:
buffer_ = line
if buffer_ is not None:
yield buffer_
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and os.path.exists(self.name):
self.file = open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
self.file.close()
@python_2_unicode_compatible
class ContentFile(File):
"""
A File-like object that takes just raw content, rather than an actual file.
"""
def __init__(self, content, name=None):
if six.PY3:
stream_class = StringIO if isinstance(content, six.text_type) else BytesIO
else:
stream_class = BytesIO
content = force_bytes(content)
super(ContentFile, self).__init__(stream_class(content), name=name)
self.size = len(content)
def __str__(self):
return 'Raw content'
def __bool__(self):
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def open(self, mode=None):
self.seek(0)
def close(self):
pass
def endswith_cr(line):
"""
Return True if line (a text or byte string) ends with '\r'.
"""
return line.endswith('\r' if isinstance(line, six.text_type) else b'\r')
def endswith_lf(line):
"""
Return True if line (a text or byte string) ends with '\n'.
"""
return line.endswith('\n' if isinstance(line, six.text_type) else b'\n')
def equals_lf(line):
"""
Return True if line (a text or byte string) equals '\n'.
"""
return line == ('\n' if isinstance(line, six.text_type) else b'\n')
| gpl-2.0 |
onitake/ansible | lib/ansible/modules/network/f5/bigiq_application_https_offload.py | 8 | 32165 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigiq_application_https_offload
short_description: Manages BIG-IQ HTTPS offload applications
description:
- Manages BIG-IQ applications used for load balancing an HTTPS application on
port 443 with SSL offloading on BIG-IP.
version_added: 2.6
options:
name:
description:
- Name of the new application.
required: True
description:
description:
- Description of the application.
servers:
description:
- A list of servers that the application is hosted on.
- If you are familiar with other BIG-IP setting, you might also refer to this
list as the list of pool members.
- When creating a new application, at least one server is required.
suboptions:
address:
description:
- The IP address of the server.
port:
description:
- The port of the server.
default: 80
inbound_virtual:
description:
- Settings to configure the virtual which will receive the inbound connection.
- This virtual will be used to host the HTTPS endpoint of the application.
- Traffic destined to the C(redirect_virtual) will be offloaded to this
parameter to ensure that proper redirection from insecure, to secure, occurs.
suboptions:
address:
description:
- Specifies destination IP address information to which the virtual server
sends traffic.
- This parameter is required when creating a new application.
netmask:
description:
- Specifies the netmask to associate with the given C(address).
- This parameter is required when creating a new application.
port:
description:
- The port that the virtual listens for connections on.
- When creating a new application, if this parameter is not specified, the
default value of C(443) will be used.
default: 443
redirect_virtual:
description:
- Settings to configure the virtual which will receive the connection to be
redirected.
- This virtual will be used to host the HTTP endpoint of the application.
- Traffic destined to this parameter will be offloaded to the
C(inbound_virtual) parameter to ensure that proper redirection from insecure,
to secure, occurs.
suboptions:
address:
description:
- Specifies destination IP address information to which the virtual server
sends traffic.
- This parameter is required when creating a new application.
netmask:
description:
- Specifies the netmask to associate with the given C(address).
- This parameter is required when creating a new application.
port:
description:
- The port that the virtual listens for connections on.
- When creating a new application, if this parameter is not specified, the
default value of C(80) will be used.
default: 80
client_ssl_profile:
description:
- Specifies the SSL profile for managing client-side SSL traffic.
suboptions:
name:
description:
- The name of the client SSL profile to created and used.
- When creating a new application, if this value is not specified, the
default value of C(clientssl) will be used.
cert_key_chain:
description:
- One or more certificates and keys to associate with the SSL profile.
- This option is always a list. The keys in the list dictate the details
of the client/key/chain/passphrase combination.
- Note that BIG-IPs can only have one of each type of each certificate/key
type. This means that you can only have one RSA, one DSA, and one ECDSA
per profile.
- If you attempt to assign two RSA, DSA, or ECDSA certificate/key combo,
the device will reject this.
- This list is a complex list that specifies a number of keys.
- When creating a new profile, if this parameter is not specified, the
default value of C(inherit) will be used.
suboptions:
cert:
description:
- Specifies a cert name for use.
required: True
key:
description:
- Specifies a key name.
required: True
chain:
description:
- Specifies a certificate chain that is relevant to the certificate and
key mentioned earlier.
- This key is optional.
passphrase:
description:
- Contains the passphrase of the key file, should it require one.
- Passphrases are encrypted on the remote BIG-IP device.
service_environment:
description:
- Specifies the name of service environment or the hostname of the BIG-IP that
the application will be deployed to.
- When creating a new application, this parameter is required.
add_analytics:
description:
- Collects statistics of the BIG-IP that the application is deployed to.
- This parameter is only relevant when specifying a C(service_environment) which
is a BIG-IP; not an SSG.
type: bool
default: no
state:
description:
- The state of the resource on the system.
- When C(present), guarantees that the resource exists with the provided attributes.
- When C(absent), removes the resource from the system.
default: present
choices:
- absent
- present
wait:
description:
- If the module should wait for the application to be created, deleted or updated.
type: bool
default: yes
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Load balance an HTTPS application on port 443 with SSL offloading on BIG-IP
bigiq_application_https_offload:
name: my-app
description: Redirect HTTP to HTTPS
service_environment: my-ssg
servers:
- address: 1.2.3.4
port: 8080
- address: 5.6.7.8
port: 8080
inbound_virtual:
address: 2.2.2.2
netmask: 255.255.255.255
port: 443
redirect_virtual:
address: 2.2.2.2
netmask: 255.255.255.255
port: 80
provider:
password: secret
server: lb.mydomain.com
user: admin
state: present
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the application of the resource.
returned: changed
type: string
sample: My application
service_environment:
description: The environment which the service was deployed to.
returned: changed
type: string
sample: my-ssg1
inbound_virtual_destination:
description: The destination of the virtual that was created.
returned: changed
type: string
sample: 6.7.8.9
inbound_virtual_netmask:
description: The network mask of the provided inbound destination.
returned: changed
type: string
sample: 255.255.255.0
inbound_virtual_port:
description: The port the inbound virtual address listens on.
returned: changed
type: int
sample: 80
servers:
description: List of servers, and their ports, that make up the application.
type: complex
returned: changed
contains:
address:
description: The IP address of the server.
returned: changed
type: string
sample: 2.3.4.5
port:
description: The port that the server listens on.
returned: changed
type: int
sample: 8080
sample: hash/dictionary of values
'''
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
try:
from library.module_utils.network.f5.bigiq import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigiq import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'templateReference': 'template_reference',
'subPath': 'sub_path',
'ssgReference': 'ssg_reference',
'configSetName': 'config_set_name',
'defaultDeviceReference': 'default_device_reference',
'addAnalytics': 'add_analytics'
}
api_attributes = [
'resources', 'description', 'configSetName', 'subPath', 'templateReference',
'ssgReference', 'defaultDeviceReference', 'addAnalytics'
]
returnables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'ssg_reference', 'default_device_reference', 'servers', 'inbound_virtual',
'redirect_virtual', 'client_ssl_profile', 'add_analytics'
]
updatables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'ssg_reference', 'default_device_reference', 'servers', 'add_analytics'
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def http_profile(self):
return "profile_http"
@property
def config_set_name(self):
return self.name
@property
def sub_path(self):
return self.name
@property
def template_reference(self):
filter = "name+eq+'Default-f5-HTTPS-offload-lb-template'"
uri = "https://{0}:{1}/mgmt/cm/global/templates/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No default HTTP LB template was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def default_device_reference(self):
if is_valid_ip(self.service_environment):
# An IP address was specified
filter = "address+eq+'{0}'".format(self.service_environment)
else:
# Assume a hostname was specified
filter = "hostname+eq+'{0}'".format(self.service_environment)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-adccore-allbigipDevices/devices/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def ssg_reference(self):
filter = "name+eq+'{0}'".format(self.service_environment)
uri = "https://{0}:{1}/mgmt/cm/cloud/service-scaling-groups/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def resources(self):
result = dict()
result.update(self.http_profile)
result.update(self.http_monitor)
result.update(self.inbound_virtual_server)
result.update(self.redirect_virtual_server)
result.update(self.pool)
result.update(self.nodes)
result.update(self.ssl_profile)
return result
@property
def inbound_virtual_server(self):
result = dict()
result['ltm:virtual:7a5f7da91996'] = [
dict(
parameters=dict(
name='default_vs',
destinationAddress=self.inbound_virtual['address'],
mask=self.inbound_virtual['netmask'],
destinationPort=self.inbound_virtual['port']
),
subcollectionResources=self.inbound_profiles
)
]
return result
@property
def inbound_profiles(self):
result = {
'profiles:14c995c33411': [
dict(
parameters=dict()
)
],
'profiles:8ba4bb101701': [
dict(
parameters=dict()
)
],
'profiles:9448fe71611e': [
dict(
parameters=dict()
)
]
}
return result
@property
def redirect_virtual_server(self):
result = dict()
result['ltm:virtual:40e8c4a6f542'] = [
dict(
parameters=dict(
name='default_redirect_vs',
destinationAddress=self.redirect_virtual['address'],
mask=self.redirect_virtual['netmask'],
destinationPort=self.redirect_virtual['port']
),
subcollectionResources=self.redirect_profiles
)
]
return result
@property
def redirect_profiles(self):
result = {
'profiles:8ba4bb101701': [
dict(
parameters=dict()
)
],
'profiles:9448fe71611e': [
dict(
parameters=dict()
)
]
}
return result
@property
def pool(self):
result = dict()
result['ltm:pool:be70d46c6d73'] = [
dict(
parameters=dict(
name='pool_0'
),
subcollectionResources=self.pool_members
)
]
return result
@property
def pool_members(self):
result = dict()
result['members:dec6d24dc625'] = []
for x in self.servers:
member = dict(
parameters=dict(
port=x['port'],
nodeReference=dict(
link='#/resources/ltm:node:45391b57b104/{0}'.format(x['address']),
fullPath='# {0}'.format(x['address'])
)
)
)
result['members:dec6d24dc625'].append(member)
return result
@property
def http_profile(self):
result = dict()
result['ltm:profile:http:8ba4bb101701'] = [
dict(
parameters=dict(
name='profile_http'
)
)
]
return result
@property
def http_monitor(self):
result = dict()
result['ltm:monitor:http:fd07629373b0'] = [
dict(
parameters=dict(
name='monitor-http'
)
)
]
return result
@property
def nodes(self):
result = dict()
result['ltm:node:45391b57b104'] = []
for x in self.servers:
tmp = dict(
parameters=dict(
name=x['address'],
address=x['address']
)
)
result['ltm:node:45391b57b104'].append(tmp)
return result
@property
def node_addresses(self):
result = [x['address'] for x in self.servers]
return result
@property
def ssl_profile(self):
result = dict()
result['ltm:profile:client-ssl:14c995c33411'] = [
dict(
parameters=dict(
name='clientssl',
certKeyChain=self.cert_key_chains
)
)
]
return result
def _get_cert_references(self):
result = dict()
uri = "https://{0}:{1}/mgmt/cm/adc-core/working-config/sys/file/ssl-cert/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
for cert in response['items']:
key = fq_name(cert['partition'], cert['name'])
result[key] = cert['selfLink']
return result
def _get_key_references(self):
result = dict()
uri = "https://{0}:{1}/mgmt/cm/adc-core/working-config/sys/file/ssl-key/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
for cert in response['items']:
key = fq_name(cert['partition'], cert['name'])
result[key] = cert['selfLink']
return result
@property
def cert_key_chains(self):
result = []
if self.client_ssl_profile is None:
return None
if 'cert_key_chain' not in self.client_ssl_profile:
return None
kc = self.client_ssl_profile['cert_key_chain']
if isinstance(kc, string_types) and kc != 'inherit':
raise F5ModuleError(
"Only the 'inherit' setting is available when 'cert_key_chain' is a string."
)
if not isinstance(kc, list):
raise F5ModuleError(
"The value of 'cert_key_chain' is not one of the supported types."
)
cert_references = self._get_cert_references()
key_references = self._get_key_references()
for idx, x in enumerate(kc):
tmp = dict(
name='clientssl{0}'.format(idx)
)
if 'cert' not in x:
raise F5ModuleError(
"A 'cert' option is required when specifying the 'cert_key_chain' parameter.."
)
elif x['cert'] not in cert_references:
raise F5ModuleError(
"The specified 'cert' was not found. Did you specify its full path?"
)
else:
key = x['cert']
tmp['certReference'] = dict(
link=cert_references[key],
fullPath=key
)
if 'key' not in x:
raise F5ModuleError(
"A 'key' option is required when specifying the 'cert_key_chain' parameter.."
)
elif x['key'] not in key_references:
raise F5ModuleError(
"The specified 'key' was not found. Did you specify its full path?"
)
else:
key = x['key']
tmp['keyReference'] = dict(
link=key_references[key],
fullPath=key
)
if 'chain' in x and x['chain'] not in cert_references:
raise F5ModuleError(
"The specified 'key' was not found. Did you specify its full path?"
)
else:
key = x['chain']
tmp['chainReference'] = dict(
link=cert_references[key],
fullPath=key
)
if 'passphrase' in x:
tmp['passphrase'] = x['passphrase']
result.append(tmp)
return result
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.want.client = self.client
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
self.changes.client = self.client
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
self.changes.client = self.client
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return False
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/ap/query/v1/tenants/default/reports/AllApplicationsList?$filter=name+eq+'{2}'".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and 'result' in response and 'totalItems' in response['result'] and response['result']['totalItems'] == 0:
return False
return True
def remove(self):
if self.module.check_mode:
return True
self_link = self.remove_from_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def has_no_service_environment(self):
if self.want.default_device_reference is None and self.want.ssg_reference is None:
return True
return False
def create(self):
if self.want.service_environment is None:
raise F5ModuleError(
"A 'service_environment' must be specified when creating a new application."
)
if self.want.servers is None:
raise F5ModuleError(
"At least one 'servers' item is needed when creating a new application."
)
if self.want.inbound_virtual is None:
raise F5ModuleError(
"An 'inbound_virtual' must be specified when creating a new application."
)
self._set_changed_options()
if self.has_no_service_environment():
raise F5ModuleError(
"The specified 'service_environment' ({0}) was not found.".format(self.want.service_environment)
)
if self.module.check_mode:
return True
self_link = self.create_on_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if not self.exists():
raise F5ModuleError(
"Failed to deploy application."
)
return True
def create_on_device(self):
params = self.changes.api_params()
params['mode'] = 'CREATE'
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
params = dict(
configSetName=self.want.name,
mode='DELETE'
)
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def wait_for_apply_template_task(self, self_link):
host = 'https://{0}:{1}'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = self_link.replace('https://localhost', host)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if response['status'] == 'FINISHED' and response.get('currentStep', None) == 'DONE':
return True
elif 'errorMessage' in response:
raise F5ModuleError(response['errorMessage'])
time.sleep(5)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
servers=dict(
type='list',
options=dict(
address=dict(required=True),
port=dict(default=80)
)
),
inbound_virtual=dict(
type='dict',
options=dict(
address=dict(required=True),
netmask=dict(required=True),
port=dict(default=443)
)
),
redirect_virtual=dict(
type='dict',
options=dict(
address=dict(required=True),
netmask=dict(required=True),
port=dict(default=80)
)
),
service_environment=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
client_ssl_profile=dict(
type='dict',
name=dict(default='clientssl'),
cert_key_chain=dict(
type='raw',
options=dict(
cert=dict(),
key=dict(),
chain=dict(),
passphrase=dict()
)
)
),
add_analytics=dict(type='bool', default='no'),
wait=dict(type='bool', default='yes')
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['inherit_cert_key_chain', 'cert_key_chain']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
exit_json(module, results, client)
except F5ModuleError as ex:
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
andyfaff/scipy | scipy/stats/_stats_mstats_common.py | 12 | 16438 | import numpy as np
import scipy.stats.stats
from . import distributions
from .._lib._bunch import _make_tuple_bunch
__all__ = ['_find_repeats', 'linregress', 'theilslopes', 'siegelslopes']
# This is not a namedtuple for backwards compatibility. See PR #12983
LinregressResult = _make_tuple_bunch('LinregressResult',
['slope', 'intercept', 'rvalue',
'pvalue', 'stderr'],
extra_field_names=['intercept_stderr'])
def linregress(x, y=None, alternative='two-sided'):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length. If
only `x` is given (and ``y=None``), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension. In
the case where ``y=None`` and `x` is a 2x2 array, ``linregress(x)`` is
equivalent to ``linregress(x[0], x[1])``.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the slope of the regression line is nonzero
* 'less': the slope of the regression line is less than zero
* 'greater': the slope of the regression line is greater than zero
.. versionadded:: 1.7.0
Returns
-------
result : ``LinregressResult`` instance
The return value is an object with the following attributes:
slope : float
Slope of the regression line.
intercept : float
Intercept of the regression line.
rvalue : float
Correlation coefficient.
pvalue : float
The p-value for a hypothesis test whose null hypothesis is
that the slope is zero, using Wald Test with t-distribution of
the test statistic. See `alternative` above for alternative
hypotheses.
stderr : float
Standard error of the estimated slope (gradient), under the
assumption of residual normality.
intercept_stderr : float
Standard error of the estimated intercept, under the assumption
of residual normality.
See Also
--------
scipy.optimize.curve_fit :
Use non-linear least squares to fit a function to data.
scipy.optimize.leastsq :
Minimize the sum of squares of a set of equations.
Notes
-----
Missing values are considered pair-wise: if a value is missing in `x`,
the corresponding value in `y` is masked.
For compatibility with older versions of SciPy, the return value acts
like a ``namedtuple`` of length 5, with fields ``slope``, ``intercept``,
``rvalue``, ``pvalue`` and ``stderr``, so one can continue to write::
slope, intercept, r, p, se = linregress(x, y)
With that style, however, the standard error of the intercept is not
available. To have access to all the computed values, including the
standard error of the intercept, use the return value as an object
with attributes, e.g.::
result = linregress(x, y)
print(result.intercept, result.intercept_stderr)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> rng = np.random.default_rng()
Generate some data:
>>> x = rng.random(10)
>>> y = 1.6*x + rng.random(10)
Perform the linear regression:
>>> res = stats.linregress(x, y)
Coefficient of determination (R-squared):
>>> print(f"R-squared: {res.rvalue**2:.6f}")
R-squared: 0.717533
Plot the data along with the fitted line:
>>> plt.plot(x, y, 'o', label='original data')
>>> plt.plot(x, res.intercept + res.slope*x, 'r', label='fitted line')
>>> plt.legend()
>>> plt.show()
Calculate 95% confidence interval on slope and intercept:
>>> # Two-sided inverse Students t-distribution
>>> # p - probability, df - degrees of freedom
>>> from scipy.stats import t
>>> tinv = lambda p, df: abs(t.ppf(p/2, df))
>>> ts = tinv(0.05, len(x)-2)
>>> print(f"slope (95%): {res.slope:.6f} +/- {ts*res.stderr:.6f}")
slope (95%): 1.453392 +/- 0.743465
>>> print(f"intercept (95%): {res.intercept:.6f}"
... f" +/- {ts*res.intercept_stderr:.6f}")
intercept (95%): 0.616950 +/- 0.544475
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
raise ValueError("If only `x` is given as input, it has to "
"be of shape (2, N) or (N, 2); provided shape "
f"was {x.shape}.")
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# Average sums of square differences from the mean
# ssxm = mean( (x-mean(x))^2 )
# ssxym = mean( (x-mean(x)) * (y-mean(y)) )
ssxm, ssxym, _, ssym = np.cov(x, y, bias=1).flat
# R-value
# r = ssxym / sqrt( ssxm * ssym )
if ssxm == 0.0 or ssym == 0.0:
# If the denominator was going to be 0
r = 0.0
else:
r = ssxym / np.sqrt(ssxm * ssym)
# Test for numerical error propagation (make sure -1 < r < 1)
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
slope = ssxym / ssxm
intercept = ymean - slope*xmean
if n == 2:
# handle case when only two points are passed in
if y[0] == y[1]:
prob = 1.0
else:
prob = 0.0
slope_stderr = 0.0
intercept_stderr = 0.0
else:
df = n - 2 # Number of degrees of freedom
# n-2 degrees of freedom because 2 has been used up
# to estimate the mean and standard deviation
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
t, prob = scipy.stats.stats._ttest_finish(df, t, alternative)
slope_stderr = np.sqrt((1 - r**2) * ssym / ssxm / df)
# Also calculate the standard error of the intercept
# The following relationship is used:
# ssxm = mean( (x-mean(x))^2 )
# = ssx - sx*sx
# = mean( x^2 ) - mean(x)^2
intercept_stderr = slope_stderr * np.sqrt(ssxm + xmean**2)
return LinregressResult(slope=slope, intercept=intercept, rvalue=r,
pvalue=prob, stderr=slope_stderr,
intercept_stderr=intercept_stderr)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
See also
--------
siegelslopes : a similar technique using repeated medians
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on
Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" %
(len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
sum(k * (k-1) * (2*k + 5) for k in nxreps) -
sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
def siegelslopes(y, x=None, method="hierarchical"):
r"""
Computes the Siegel estimator for a set of points (x, y).
`siegelslopes` implements a method for robust linear regression
using repeated medians (see [1]_) to fit a line to the points (x, y).
The method is robust to outliers with an asymptotic breakdown point
of 50%.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
method : {'hierarchical', 'separate'}
If 'hierarchical', estimate the intercept using the estimated
slope ``medslope`` (default option).
If 'separate', estimate the intercept independent of the estimated
slope. See Notes for details.
Returns
-------
medslope : float
Estimate of the slope of the regression line.
medintercept : float
Estimate of the intercept of the regression line.
See also
--------
theilslopes : a similar technique without repeated medians
Notes
-----
With ``n = len(y)``, compute ``m_j`` as the median of
the slopes from the point ``(x[j], y[j])`` to all other `n-1` points.
``medslope`` is then the median of all slopes ``m_j``.
Two ways are given to estimate the intercept in [1]_ which can be chosen
via the parameter ``method``.
The hierarchical approach uses the estimated slope ``medslope``
and computes ``medintercept`` as the median of ``y - medslope*x``.
The other approach estimates the intercept separately as follows: for
each point ``(x[j], y[j])``, compute the intercepts of all the `n-1`
lines through the remaining points and take the median ``i_j``.
``medintercept`` is the median of the ``i_j``.
The implementation computes `n` times the median of a vector of size `n`
which can be slow for large vectors. There are more efficient algorithms
(see [2]_) which are not implemented here.
References
----------
.. [1] A. Siegel, "Robust Regression Using Repeated Medians",
Biometrika, Vol. 69, pp. 242-244, 1982.
.. [2] A. Stein and M. Werman, "Finding the repeated median regression
line", Proceedings of the Third Annual ACM-SIAM Symposium on
Discrete Algorithms, pp. 409-413, 1992.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope and intercept. For comparison, also compute the
least-squares fit with `linregress`:
>>> res = stats.siegelslopes(y, x)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Siegel regression line is shown in red. The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
if method not in ['hierarchical', 'separate']:
raise ValueError("method can only be 'hierarchical' or 'separate'")
y = np.asarray(y).ravel()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.asarray(x, dtype=float).ravel()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" %
(len(y), len(x)))
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes, intercepts = [], []
for j in range(len(x)):
id_nonzero = deltax[j, :] != 0
slopes_j = deltay[j, id_nonzero] / deltax[j, id_nonzero]
medslope_j = np.median(slopes_j)
slopes.append(medslope_j)
if method == 'separate':
z = y*x[j] - y[j]*x
medintercept_j = np.median(z[id_nonzero] / deltax[j, id_nonzero])
intercepts.append(medintercept_j)
medslope = np.median(np.asarray(slopes))
if method == "separate":
medinter = np.median(np.asarray(intercepts))
else:
medinter = np.median(y - medslope*x)
return medslope, medinter
| bsd-3-clause |
hkailee/FluSeq | ms2ContLearning.py | 1 | 12630 | #!/usr/bin/env python3.4
__author__ = 'mdc_hk'
version = '1.0'
#===========================================================================================================
import datetime, logging, multiprocessing, os, re, subprocess, sys, time
import pandas as pd
from pandas import Series
from bs4 import BeautifulSoup
#===========================================================================================================
# Functions:
# 1: Checks if in proper number of arguments are passed gives instructions on proper use.
def argsCheck(numArgs):
if len(sys.argv) < numArgs or len(sys.argv) > numArgs:
print('To learn about contamination rate from system control')
print('Usage:', sys.argv[0], '<FolderInput>',' <DateOfRunInYYMMDD>')
print('Examples:', sys.argv[0], 'FolderMS2_001', '151225')
exit(1) # Aborts program. (exit(1) indicates that an error occurred)
#===========================================================================================================
# Housekeeping.
argsCheck(3) # Checks if the number of arguments are correct.
# Stores file one for input checking.
inFolder = sys.argv[1]
dateOfRun = sys.argv[2]
# Setting up working and fluSeq directories...
workingFolder_tmp = '~/FluSeq/' + inFolder
os.chdir(os.path.expanduser(workingFolder_tmp))
workingFolder = os.getcwd()
fluSeqFolder = os.path.expanduser('~/FluSeq/')
# Logging events...
logging.basicConfig(filename=workingFolder + '/Log.txt', level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
startTime = time.time()
logging.info('Command-line invocation: ' + sys.argv[0] + ' ' + sys.argv[1])
logging.info('Runfolder path: ' + workingFolder)
# determining number of logical CPUs availalbe in your PC...
numberOfProcessors = multiprocessing.cpu_count()
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- The program will be optimized to use '
+ str(numberOfProcessors) + ' logical CPUs available in your PC.')
logging.info('Detected logical CPUs: ' + str(numberOfProcessors))
# Filing number of unique samples found in the working folder...
workingFilesR1 = [f for f in os.listdir(workingFolder) if re.match(r'[\S]+S\d+_L001_R1_001\.fastq\.gz', f)]
fastqFileNameR1 = re.compile(r'(([\S]+)_S\d+_L001_R)1_001\.fastq\.gz')
fastqFilesR1 = []
for file in workingFilesR1:
fastqFilesR1.append(fastqFileNameR1.findall(file))
# Starting the analyses...
for file in fastqFilesR1:
# bwa aln...
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Performing bwa aln for paired-end reads '
'of sample ' + file[0][1])
proc1 = subprocess.Popen(['bwa', 'aln', '-t', str(numberOfProcessors), '-q', '15', '-f',
workingFolder + '/' + file[0][0] + '1_001.sai',
fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa',
workingFolder + '/' + file[0][0]+'1_001.fastq.gz'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc1.communicate()
logging.info('bwa aln -t ' + str(numberOfProcessors) + ' -q 15 -f ' + workingFolder + '/'
+ file[0][0] + '1_001.sai ' + fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa '
+ workingFolder + '/' + file[0][0]+'1_001.fastq.gz')
proc2 = subprocess.Popen(['bwa', 'aln', '-t', str(numberOfProcessors), '-q', '15',
'-f', workingFolder + '/' + file[0][0] + '2_001.sai',
fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa',
workingFolder + '/' + file[0][0]+'2_001.fastq.gz'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc2.communicate()
logging.info('bwa aln -t ' + str(numberOfProcessors) + ' -q 15 -f ' + workingFolder + '/'
+ file[0][0] + '2_001.sai ' + fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa '
+ workingFolder + '/' + file[0][0]+'2_001.fastq.gz')
# bwa sampe...
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Performing bwa sampe for sample ' +
file[0][1])
proc3 = subprocess.Popen(['bwa', 'sampe', '-r' + '@RG\tID:'+file[0][1]+'\tPL:ILLUMINA\tSM:'+file[0][1],
'-f', workingFolder + '/' + file[0][1] + '.uncompressed.bam',
fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa',
workingFolder + '/' + file[0][0] + '1_001.sai',
workingFolder + '/' + file[0][0] + '2_001.sai',
workingFolder + '/' + file[0][0] + '1_001.fastq.gz',
workingFolder + '/' + file[0][0] + '2_001.fastq.gz'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc3.communicate()
logging.info('bwa sampe -r @RG\tID:'+file[0][1]+'\tPL:ILLUMINA\tSM:'+file[0][1] +
' -f '+ workingFolder + '/' + file[0][1] + '.uncompressed.bam ' +
fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa ' +
workingFolder + '/' + file[0][0] + '1_001.sai ' +
workingFolder + '/' + file[0][0] + '2_001.sai ' +
workingFolder + '/' + file[0][0] + '1_001.fastq.gz ' +
workingFolder + '/' + file[0][0] + '2_001.fastq.gz')
# Performing bam sorting using samtools...
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Performing bam sorting for sample '
+ file[0][1] + ' using samtools sort module')
proc4 = subprocess.Popen(['samtools', 'sort', workingFolder + '/' + file[0][1] +
'.uncompressed.bam', workingFolder + '/' + file[0][1]],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc4.communicate()
logging.info('samtools sort ' + workingFolder + '/' + file[0][1] +
'.uncompressed.bam ' + workingFolder + '/' + file[0][1])
# Performing bam indexing using samtools...
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Performing samtools indexing for sample '
+ file[0][1] + ' using samtools index module')
proc5 = subprocess.Popen(['samtools', 'index', workingFolder + '/' + file[0][1]+'.bam'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc5.communicate()
logging.info('samtools index ' + workingFolder + '/' + file[0][1]+'.bam')
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Analysing Depth of Coverage for each '
'gene segments of sample ' + file[0][1] + ' using GATK DepthOfCoverage')
proc6 = subprocess.Popen(['java', '-Xmx4g', '-jar', '/home/hklee/Software/GenomeAnalysisTK.jar', '-T',
'DepthOfCoverage', '-R', fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa',
'-o', workingFolder + '/' + 'MS2SysCtrl_base',
'-I', workingFolder + '/' + file[0][1]+'.bam', '-omitIntervals',
'-omitSampleSummary'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc6.communicate()
logging.info('java -Xmx4g -jar /home/hklee/Software/GenomeAnalysisTK.jar -T DepthOfCoverage -R'
+ fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa -o ' + workingFolder + '/'
+ 'MS2SysCtrl_base -I ' + workingFolder + '/' + file[0][1]
+ '.bam -omitIntervals -omitSampleSummary')
# Housekeeping...
os.unlink(workingFolder + '/' + file[0][0] + '1_001.sai')
os.unlink(workingFolder + '/' + file[0][0] + '2_001.sai')
os.unlink(workingFolder + '/' + file[0][1] + '.uncompressed.bam')
os.unlink(workingFolder + '/' + file[0][1]+'.bam')
os.unlink(workingFolder + '/' + file[0][1]+'.bam.bai')
dataFromTable = pd.read_table(workingFolder + '/MS2SysCtrl_base', sep='\t')
columns_tojoin = dataFromTable[('Locus')].str.split(":").apply(Series, 1)
columns_tojoin.columns = ['CHROM', 'POS']
dataFromTable = pd.merge(dataFromTable, columns_tojoin, left_index=True, right_index=True)
dataFromTable = dataFromTable.set_index(['CHROM'], drop=False)
try:
bsObj = BeautifulSoup(open(workingFolder + '/ResequencingRunStatistics.xml'), 'lxml-xml')
except IOError:
errorFile = open('Error.txt', 'a')
errorFile.write('Please make sure the run-specific ResequencingRunStatistics.xml is placed in the run folder'
+ '\n')
errorFile.close()
exit(1)
xmlOfSamples = bsObj.findAll('SummarizedSampleStatistics')
if not xmlOfSamples:
xmlOfSamples = bsObj.findAll('SummarizedSampleStatisics')
listOfSamples = []
for name in xmlOfSamples:
listOfSamples.append(name.SampleName.get_text())
listOfPFReads = []
for name in xmlOfSamples:
listOfPFReads.append(round(int(name.NumberOfClustersPF.get_text())/1000000, 2))
if len(listOfSamples) == len(listOfPFReads):
dictOfSamplesAndPFreads = dict(zip(listOfSamples, listOfPFReads))
geneList = []
def geneAnalysis(Chrom, Gene):
GeneData = dataFromTable.ix[Chrom]
GeneData.is_copy = False
GeneData['PFreads_inMillion'] = dictOfSamplesAndPFreads['MS2SysCtrl']
GeneData['DateOfRun'] = dateOfRun
GeneData['GeneSegment'] = Gene
GeneData = GeneData.set_index(['POS'], drop=False)
geneSeq = list(dictOfGeneSegment[Chrom])
for k in range(0, len(geneSeq), 150):
GeneData_a = GeneData.ix[[k],['DateOfRun', 'CHROM', 'GeneSegment', 'POS', 'Total_Depth', 'PFreads_inMillion']]
geneList.append(GeneData_a)
return
with open(fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa' ,'r') as refFile:
content1 = refFile.read().splitlines()
geneName = []
for i in range(len(content1)):
if content1.index(content1[i]) % 2 == 0 or content1.index(content1[i]) == 0:
geneName.append(content1[i][1:])
geneCont = []
for i in range(len(content1)):
if content1.index(content1[i]) % 2 != 0:
geneCont.append(content1[i])
if len(geneName) == len(geneCont):
dictOfGeneSegment = dict(zip(geneName, geneCont))
with open(fluSeqFolder + 'GENOMERepository/H3N2Genome_annotated.fa','r') as refFile:
sequences = refFile.read()
segmentRegex = re.compile(r'((Segment\d)_[\S]*)')
segment = segmentRegex.findall(sequences)
for seg in segment:
try:
geneAnalysis(seg[0], seg[1])
except:
errorFile = open('Error.txt', 'a')
errorFile.write(seg[1] + ' of MS2 System Control was not analysed. Suggested solution: Something is wrong '
'with the formatting of the AllGenes.xls file' + '\n')
errorFile.close()
try:
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- Updating existing ms2SysCtrl.db')
dataBase = pd.read_csv(fluSeqFolder + 'ms2SysCtrl.db', sep=',')
dataBase_tmp = pd.concat(geneList, axis=0)
with open(fluSeqFolder + 'ms2SysCtrl.db', 'a') as f:
dataBase_tmp.to_csv(f, header=False)
except OSError:
print('Warning: The MS2 contamination database file is not found in the designated directory. '
'A new database will be created. Please note that it is important to have sufficient data in the database '
'to provide a confident contamination statistics for data analyses')
errorFile = open('Error.txt', 'a')
errorFile.write('Warning: The MS2 contamination database file is not found in the designated directory. A new '
'database will be created. Please note that it is important to have sufficient data in the '
'database to provide a confident contamination statistics for data analyses.\n')
errorFile.close()
pd.concat(geneList, axis=0).to_csv(fluSeqFolder + 'ms2SysCtrl.db')
# Housekeeping...
try:
os.unlink(workingFolder + '/MS2SysCtrl_base.sample_cumulative_coverage_counts')
except OSError:
print(workingFolder + '/MS2SysCtrl_base.sample_cumulative_coverage_counts is not found')
try:
os.unlink(workingFolder + '/MS2SysCtrl_base.sample_cumulative_coverage_proportions')
except OSError:
print(workingFolder + '/MS2SysCtrl_base.sample_cumulative_coverage_proportions is not found')
print('>> '+ datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'), '- MS2 System contamination learning done ')
| mit |
cmvac/demagorgon.repository | plugin.video.foradabox/playtvfr.py | 297 | 8750 | import struct
import urllib2,urllib
import re
import json
import math
CRYPT_XXTEA_DELTA= 0x9E3779B9
headers = [('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),( 'Connection','Keep-Alive')]
class Crypt_XXTEA:
_key=None
def setKey(self,key):
if isinstance(key, basestring):
k = self._str2long(key, False);
elif isinstance(key, list):
k = key;
else:
print "The secret key must be a string or long integer array"
if (len(k) > 4):
print "The secret key cannot be more than 16 characters or 4 long values"
elif (len(k) == 0):
print "The secret key cannot be empty"
elif (len(k) < 4):
for i in range(len(k),4):
k.append(0)
#k[i] = 0;
#print k
self._key = k;
def encrypt(self,plaintext):
if (self._key == None):
print "Secret key is undefined"
if isinstance(plaintext, basestring):
return self._encryptString(plaintext)
elif isinstance(plaintext, list):
return self._encryptArray(plaintext)
else:
print "The plain text must be a string or long integer array"
def decrypt(self,ciphertext):
if (self._key == None):
print "Secret key is undefined"
#print 'dec',isinstance(ciphertext, basestring)
if isinstance(ciphertext, basestring):
return self._decryptString(ciphertext)
elif isinstance(ciphertext, list):
return self._decryptArray(ciphertext)
else:
print "The plain text must be a string or long integer array"
def _encryptString(self,str):
if (str == ''):
return ''
v = self._str2long(str, False);
v = self._encryptArray(v);
return self._long2str(v, False);
def _encryptArray(self,v):
n = len(v) - 1;
z = v[n];
y = v[0];
q = math.floor(6 + 52 / (n + 1));
sum = 0;
while (0 < q):
q-=1
sum = self._int32(sum + CRYPT_XXTEA_DELTA);
e = sum >> 2 & 3;
for p in range(0,n):
y = v[p + 1];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
z = v[p] = self._int32(v[p] + mx);
p+=1#due to range
y = v[0];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
z = v[n] = self._int32(v[n] + mx);
return v;
def _decryptString(self,str):
if (str == ''):
return '';
v = self._str2long(str, False);
v = self._decryptArray(v);
return self._long2str(v, False);
def _decryptArray(self,v):
n = len(v) - 1;
z = v[n];
y = v[0];
q = math.floor(6 + 52 / (n + 1));
sum = self._int32(q * CRYPT_XXTEA_DELTA);
while (sum != 0):
e = sum >> 2 & 3;
for p in range( n, 0, -1):
z = v[p - 1];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
y = v[p] = self._int32(v[p] - mx);
p=p-1 #due to range
z = v[n];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
y = v[0] = self._int32(v[0] - mx);
sum = self._int32(sum - CRYPT_XXTEA_DELTA);
return v;
def _long2str(self,v, w):
ln = len(v);
s = '';
for i in range(0,ln):
s += struct.pack('<I', v[i]&0xFFFFFFFF);
if (w):
return substr(s, 0, v[ln - 1]);
else:
return s;
def _str2long(self,s, w):
#return (s + ("\0" *( (4 - len(s) % 4) & 3))).encode("hex")
i=int(math.ceil((len(s)/4)))
if (len(s)%4)>0 :
i+=1
#print struct.unpack('<I',(s + ("\0" *( (4 - len(s) % 4) & 3))))
v = list(struct.unpack(('I'*i),(s + ("\0" *( (4 - len(s) % 4) & 3)))))
if (w):
v[0] = len(s); #prb
return v;
def _int32(self,n):
while (n >= 2147483648):
n -= 4294967296;
while (n <= -2147483649):
n += 4294967296;
return int(n);
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
def HexToByte( hexStr ):
"""
Convert a string hex byte values into a byte string. The Hex Byte values may
or may not be space separated.
"""
# The list comprehension implementation is fractionally slower in this case
#
# hexStr = ''.join( hexStr.split(" ") )
# return ''.join( ["%c" % chr( int ( hexStr[i:i+2],16 ) ) \
# for i in range(0, len( hexStr ), 2) ] )
bytes = []
hexStr = ''.join( hexStr.split(" ") )
for i in range(0, len(hexStr), 2):
bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )
return ''.join( bytes )
def get_url(player_id):
v=Crypt_XXTEA()
import time
# Retrieve channel id and primary key
timestamp = time.time();
#player_id = '69T7MabZ47';
init = getUrl("http://tvplayer.playtv.fr/js/"+player_id+".js?_="+str(timestamp),headers=headers);
#print init
pat="b:(\{\"a.*\"})}"
init =re.compile(pat).findall(init)[0]
init = json.loads(init);
from binascii import unhexlify
from binascii import hexlify
a = init['a'];
b = init['b'];
b=b.decode("hex")
a=a.decode("hex")
bb=""
v.setKey("object");
#b=v._long2str(b,False)
b_s=v.decrypt(b).rstrip('\0')
params = json.loads(b_s)
pack_k=params['k'].decode("hex")# pack("H*", params['k'])#init['a']
key = v.decrypt(pack_k).rstrip('\0');
v.setKey(key);
a_d=v.decrypt(a).rstrip('\0')
params = json.loads(a_d);
channel_id = params['i'];
api_url = params['u'];
req={"i": channel_id, "t": timestamp,"h":"playtv.fr","a":5}
req = json.dumps(req)
req_en=v.encrypt(req)
req_en=req_en.encode("hex");# struct.unpack("H"*(len(req_en)/4),req_en);
if not req_en.endswith( '/'):
req_en += '/';
headers2 =headers.append( [('Referer','http://static.playtv.fr/swf/tvplayer.swf?r=22'),( 'x-flash-version','11,6,602,180')])
init = getUrl(api_url+req_en,headers=headers2);
init=init.decode("hex")
params = json.loads(v.decrypt(init).rstrip('\0'));
if params['s'][1] and params['s'][1] <>'' :
streams =params['s'][0] if params['s'][0]['bitrate'] > params['s'][1]['bitrate'] else params['s'][1];
else:
streams = params['s'][0];
scheme = streams['scheme'];
host = streams['host'];
port = streams['port'];
app = streams['application'];
playpath = streams['stream'];
token = streams['token'];
title = streams['title'];
t = params['j']['t'];
k = params['j']['k'];
v.setKey("object");
key=v.decrypt(k.decode("hex"))# pack("H*", k));
v.setKey(key);
auth = v.encrypt(t).encode("hex") #unpack("H*", $xxtea->encrypt($t));
if (scheme == "http"):
final_url = scheme+"://"+host + ( ":" +port if port and len(port)>0 else "") + "/" + playpath
else:
final_url = scheme + "://" + host +( ":" +port if port and len(port)>0 else "") + "/" + app +" app=" + app +" swfUrl=http://static.playtv.fr/swf/tvplayer.swf pageUrl=http://playtv.fr/television Conn=S:" + auth + (" token=" + token if token and len(token)>0 else "") + " playpath=" + playpath +' live=1 timeout=20'
print final_url
return final_url
#print get_url('69T7MabZ47')
| gpl-2.0 |
analyseuc3m/ANALYSE-v1 | lms/djangoapps/teams/views.py | 29 | 52321 | """HTTP endpoints for the Teams API."""
import logging
from django.shortcuts import get_object_or_404, render_to_response
from django.http import Http404
from django.conf import settings
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework.authentication import SessionAuthentication
from rest_framework_oauth.authentication import OAuth2Authentication
from rest_framework import status
from rest_framework import permissions
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
from django_countries import countries
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from openedx.core.lib.api.parsers import MergePatchParser
from openedx.core.lib.api.permissions import IsStaffOrReadOnly
from openedx.core.lib.api.view_utils import (
RetrievePatchAPIView,
add_serializer_errors,
build_api_error,
ExpandableFieldViewMixin
)
from openedx.core.lib.api.paginators import paginate_search_results, DefaultPagination
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_with_access, has_access
from student.models import CourseEnrollment, CourseAccessRole
from student.roles import CourseStaffRole
from django_comment_client.utils import has_discussion_privileges
from util.model_utils import truncate_fields
from . import is_feature_enabled
from lms.djangoapps.teams.models import CourseTeam, CourseTeamMembership
from .serializers import (
CourseTeamSerializer,
CourseTeamCreationSerializer,
TopicSerializer,
BulkTeamCountTopicSerializer,
MembershipSerializer,
add_team_count
)
from .search_indexes import CourseTeamIndexer
from .errors import AlreadyOnTeamInCourse, ElasticSearchConnectionError, NotEnrolledInCourseForTeam
from .utils import emit_team_event
TEAM_MEMBERSHIPS_PER_PAGE = 2
TOPICS_PER_PAGE = 12
MAXIMUM_SEARCH_SIZE = 100000
log = logging.getLogger(__name__)
@receiver(post_save, sender=CourseTeam)
def team_post_save_callback(sender, instance, **kwargs): # pylint: disable=unused-argument
""" Emits signal after the team is saved. """
changed_fields = instance.field_tracker.changed()
# Don't emit events when we are first creating the team.
if not kwargs['created']:
for field in changed_fields:
if field not in instance.FIELD_BLACKLIST:
truncated_fields = truncate_fields(unicode(changed_fields[field]), unicode(getattr(instance, field)))
truncated_fields['team_id'] = instance.team_id
truncated_fields['field'] = field
emit_team_event(
'edx.team.changed',
instance.course_id,
truncated_fields
)
class TeamAPIPagination(DefaultPagination):
"""
Pagination format used by the teams API.
"""
page_size_query_param = "page_size"
def get_paginated_response(self, data):
"""
Annotate the response with pagination information.
"""
response = super(TeamAPIPagination, self).get_paginated_response(data)
# Add the current page to the response.
# It may make sense to eventually move this field into the default
# implementation, but for now, teams is the only API that uses this.
response.data["current_page"] = self.page.number
# This field can be derived from other fields in the response,
# so it may make sense to have the JavaScript client calculate it
# instead of including it in the response.
response.data["start"] = (self.page.number - 1) * self.get_page_size(self.request)
return response
class TopicsPagination(TeamAPIPagination):
"""Paginate topics. """
page_size = TOPICS_PER_PAGE
class MyTeamsPagination(TeamAPIPagination):
"""Paginate the user's teams. """
page_size = TEAM_MEMBERSHIPS_PER_PAGE
class TeamsDashboardView(GenericAPIView):
"""
View methods related to the teams dashboard.
"""
def get(self, request, course_id):
"""
Renders the teams dashboard, which is shown on the "Teams" tab.
Raises a 404 if the course specified by course_id does not exist, the
user is not registered for the course, or the teams feature is not enabled.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if not CourseEnrollment.is_enrolled(request.user, course.id) and \
not has_access(request.user, 'staff', course, course.id):
raise Http404
user = request.user
# Even though sorting is done outside of the serializer, sort_order needs to be passed
# to the serializer so that the paginated results indicate how they were sorted.
sort_order = 'name'
topics = get_alphabetical_topics(course)
# Paginate and serialize topic data
# BulkTeamCountPaginatedTopicSerializer will add team counts to the topics in a single
# bulk operation per page.
topics_data = self._serialize_and_paginate(
TopicsPagination,
topics,
request,
BulkTeamCountTopicSerializer,
{'course_id': course.id},
)
topics_data["sort_order"] = sort_order
user = request.user
user_teams = CourseTeam.objects.filter(membership__user=user, course_id=course.id)
user_teams_data = self._serialize_and_paginate(
MyTeamsPagination,
user_teams,
request,
CourseTeamSerializer,
{'expand': ('user',)}
)
context = {
"course": course,
"topics": topics_data,
# It is necessary to pass both privileged and staff because only privileged users can
# administer discussion threads, but both privileged and staff users are allowed to create
# multiple teams (since they are not automatically added to teams upon creation).
"user_info": {
"username": user.username,
"privileged": has_discussion_privileges(user, course_key),
"staff": bool(has_access(user, 'staff', course_key)),
"teams": user_teams_data
},
"topic_url": reverse(
'topics_detail', kwargs={'topic_id': 'topic_id', 'course_id': str(course_id)}, request=request
),
"topics_url": reverse('topics_list', request=request),
"teams_url": reverse('teams_list', request=request),
"teams_detail_url": reverse('teams_detail', args=['team_id']),
"team_memberships_url": reverse('team_membership_list', request=request),
"my_teams_url": reverse('teams_list', request=request),
"team_membership_detail_url": reverse('team_membership_detail', args=['team_id', user.username]),
"languages": [[lang[0], _(lang[1])] for lang in settings.ALL_LANGUAGES], # pylint: disable=translation-of-non-string
"countries": list(countries),
"disable_courseware_js": True,
"teams_base_url": reverse('teams_dashboard', request=request, kwargs={'course_id': course_id}),
}
return render_to_response("teams/teams.html", context)
def _serialize_and_paginate(self, pagination_cls, queryset, request, serializer_cls, serializer_ctx):
"""
Serialize and paginate objects in a queryset.
Arguments:
pagination_cls (pagination.Paginator class): Django Rest Framework Paginator subclass.
queryset (QuerySet): Django queryset to serialize/paginate.
serializer_cls (serializers.Serializer class): Django Rest Framework Serializer subclass.
serializer_ctx (dict): Context dictionary to pass to the serializer
Returns: dict
"""
# Django Rest Framework v3 requires that we pass the request
# into the serializer's context if the serialize contains
# hyperlink fields.
serializer_ctx["request"] = request
# Instantiate the paginator and use it to paginate the queryset
paginator = pagination_cls()
page = paginator.paginate_queryset(queryset, request)
# Serialize the page
serializer = serializer_cls(page, context=serializer_ctx, many=True)
# Use the paginator to construct the response data
# This will use the pagination subclass for the view to add additional
# fields to the response.
# For example, if the input data is a list, the output data would
# be a dictionary with keys "count", "next", "previous", and "results"
# (where "results" is set to the value of the original list)
return paginator.get_paginated_response(serializer.data).data
def has_team_api_access(user, course_key, access_username=None):
"""Returns True if the user has access to the Team API for the course
given by `course_key`. The user must either be enrolled in the course,
be course staff, be global staff, or have discussion privileges.
Args:
user (User): The user to check access for.
course_key (CourseKey): The key to the course which we are checking access to.
access_username (string): If provided, access_username must match user.username for non staff access.
Returns:
bool: True if the user has access, False otherwise.
"""
if user.is_staff:
return True
if CourseStaffRole(course_key).has_user(user):
return True
if has_discussion_privileges(user, course_key):
return True
if not access_username or access_username == user.username:
return CourseEnrollment.is_enrolled(user, course_key)
return False
class TeamsListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Get or create a course team.
**Example Requests**:
GET /api/team/v0/teams
POST /api/team/v0/teams
**Query Parameters for GET**
* course_id: Filters the result to teams belonging to the given
course. Required.
* topic_id: Filters the result to teams associated with the given
topic.
* text_search: Searches for full word matches on the name, description,
country, and language fields. NOTES: Search is on full names for countries
and languages, not the ISO codes. Text_search cannot be requested along with
with order_by.
* order_by: Cannot be called along with with text_search. Must be one of the following:
* name: Orders results by case insensitive team name (default).
* open_slots: Orders results by most open slots (for tie-breaking,
last_activity_at is used, with most recent first).
* last_activity_at: Orders result by team activity, with most active first
(for tie-breaking, open_slots is used, with most open slots first).
* username: Return teams whose membership contains the given user.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of teams matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the teams matching the request.
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is associated
with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* last_activity_at: The date of the last activity of any team member
within the team.
* membership: A list of the users that are members of the team.
See membership endpoint for more detail.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course specified by course_id or
is not course or global staff, a 403 error is returned.
If the specified course_id is not valid or the user attempts to
use an unsupported query parameter, a 400 error is returned.
If the response does not exist, a 404 error is returned. For
example, the course_id may not reference a real course or the page
number may be beyond the last page.
If the server is unable to connect to Elasticsearch, and
the text_search parameter is supplied, a 503 error is returned.
**Response Values for POST**
Any logged in user who has verified their email address can create
a team. The format mirrors that of a GET for an individual team,
but does not include the id, date_created, or membership fields.
id is automatically computed based on name.
If the user is not logged in, a 401 error is returned.
If the user is not enrolled in the course, is not course or
global staff, or does not have discussion privileges a 403 error
is returned.
If the course_id is not valid or extra fields are included in the
request, a 400 error is returned.
If the specified course does not exist, a 404 error is returned.
"""
# OAuth2Authentication must come first to return a 401 for unauthenticated users
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = CourseTeamSerializer
pagination_class = TeamAPIPagination
def get(self, request):
"""GET /api/team/v0/teams/"""
result_filter = {}
if 'course_id' in request.query_params:
course_id_string = request.query_params['course_id']
try:
course_key = CourseKey.from_string(course_id_string)
# Ensure the course exists
course_module = modulestore().get_course(course_key)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
result_filter.update({'course_id': course_key})
except InvalidKeyError:
error = build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string,
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
else:
return Response(
build_api_error(ugettext_noop("course_id must be provided")),
status=status.HTTP_400_BAD_REQUEST
)
text_search = request.query_params.get('text_search', None)
if text_search and request.query_params.get('order_by', None):
return Response(
build_api_error(ugettext_noop("text_search and order_by cannot be provided together")),
status=status.HTTP_400_BAD_REQUEST
)
username = request.query_params.get('username', None)
if username is not None:
result_filter.update({'membership__user__username': username})
topic_id = request.query_params.get('topic_id', None)
if topic_id is not None:
if topic_id not in [topic['id'] for topic in course_module.teams_configuration['topics']]:
error = build_api_error(
ugettext_noop('The supplied topic id {topic_id} is not valid'),
topic_id=topic_id
)
return Response(error, status=status.HTTP_400_BAD_REQUEST)
result_filter.update({'topic_id': topic_id})
if text_search and CourseTeamIndexer.search_is_enabled():
try:
search_engine = CourseTeamIndexer.engine()
except ElasticSearchConnectionError:
return Response(
build_api_error(ugettext_noop('Error connecting to elasticsearch')),
status=status.HTTP_503_SERVICE_UNAVAILABLE
)
result_filter.update({'course_id': course_id_string})
search_results = search_engine.search(
query_string=text_search,
field_dictionary=result_filter,
size=MAXIMUM_SEARCH_SIZE,
)
paginated_results = paginate_search_results(
CourseTeam,
search_results,
self.paginator.get_page_size(request),
self.get_page()
)
emit_team_event('edx.team.searched', course_key, {
"number_of_results": search_results['total'],
"search_text": text_search,
"topic_id": topic_id,
})
page = self.paginate_queryset(paginated_results)
serializer = self.get_serializer(page, many=True)
order_by_input = None
else:
queryset = CourseTeam.objects.filter(**result_filter)
order_by_input = request.query_params.get('order_by', 'name')
if order_by_input == 'name':
# MySQL does case-insensitive order_by.
queryset = queryset.order_by('name')
elif order_by_input == 'open_slots':
queryset = queryset.order_by('team_size', '-last_activity_at')
elif order_by_input == 'last_activity_at':
queryset = queryset.order_by('-last_activity_at', 'team_size')
else:
return Response({
'developer_message': "unsupported order_by value {ordering}".format(ordering=order_by_input),
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _(u"The ordering {ordering} is not supported").format(ordering=order_by_input),
}, status=status.HTTP_400_BAD_REQUEST)
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
response = self.get_paginated_response(serializer.data)
if order_by_input is not None:
response.data['sort_order'] = order_by_input
return response
def post(self, request):
"""POST /api/team/v0/teams/"""
field_errors = {}
course_key = None
course_id = request.data.get('course_id')
try:
course_key = CourseKey.from_string(course_id)
# Ensure the course exists
if not modulestore().has_course(course_key):
return Response(status=status.HTTP_404_NOT_FOUND)
except InvalidKeyError:
field_errors['course_id'] = build_api_error(
ugettext_noop('The supplied course_id {course_id} is not valid.'),
course_id=course_id
)
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
# Course and global staff, as well as discussion "privileged" users, will not automatically
# be added to a team when they create it. They are allowed to create multiple teams.
team_administrator = (has_access(request.user, 'staff', course_key)
or has_discussion_privileges(request.user, course_key))
if not team_administrator and CourseTeamMembership.user_in_team_for_course(request.user, course_key):
error_message = build_api_error(
ugettext_noop('You are already in a team in this course.'),
course_id=course_id
)
return Response(error_message, status=status.HTTP_400_BAD_REQUEST)
if course_key and not has_team_api_access(request.user, course_key):
return Response(status=status.HTTP_403_FORBIDDEN)
data = request.data.copy()
data['course_id'] = course_key
serializer = CourseTeamCreationSerializer(data=data)
add_serializer_errors(serializer, data, field_errors)
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
else:
team = serializer.save()
emit_team_event('edx.team.created', course_key, {
'team_id': team.team_id
})
if not team_administrator:
# Add the creating user to the team.
team.add_user(request.user)
emit_team_event(
'edx.team.learner_added',
course_key,
{
'team_id': team.team_id,
'user_id': request.user.id,
'add_method': 'added_on_create'
}
)
data = CourseTeamSerializer(team, context={"request": request}).data
return Response(data)
def get_page(self):
""" Returns page number specified in args, params, or defaults to 1. """
# This code is taken from within the GenericAPIView#paginate_queryset method.
# We need need access to the page outside of that method for our paginate_search_results method
page_kwarg = self.kwargs.get(self.paginator.page_query_param)
page_query_param = self.request.query_params.get(self.paginator.page_query_param)
return page_kwarg or page_query_param or 1
class IsEnrolledOrIsStaff(permissions.BasePermission):
"""Permission that checks to see if the user is enrolled in the course or is staff."""
def has_object_permission(self, request, view, obj):
"""Returns true if the user is enrolled or is staff."""
return has_team_api_access(request.user, obj.course_id)
class IsStaffOrPrivilegedOrReadOnly(IsStaffOrReadOnly):
"""
Permission that checks to see if the user is global staff, course
staff, or has discussion privileges. If none of those conditions are
met, only read access will be granted.
"""
def has_object_permission(self, request, view, obj):
return (
has_discussion_privileges(request.user, obj.course_id) or
super(IsStaffOrPrivilegedOrReadOnly, self).has_object_permission(request, view, obj)
)
class TeamsDetailView(ExpandableFieldViewMixin, RetrievePatchAPIView):
"""
**Use Cases**
Get, update, or delete a course team's information. Updates are supported
only through merge patch.
**Example Requests**:
GET /api/team/v0/teams/{team_id}}
PATCH /api/team/v0/teams/{team_id} "application/merge-patch+json"
DELETE /api/team/v0/teams/{team_id}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in, the response contains the following fields:
* id: The team's unique identifier.
* discussion_topic_id: The unique id of the comments service
discussion topic associated with this team.
* name: The name of the team.
* course_id: The identifier for the course this team belongs to.
* topic_id: Optionally specifies which topic the team is
associated with.
* date_created: Date and time when the team was created.
* description: A description of the team.
* country: Optionally specifies which country the team is
associated with.
* language: Optionally specifies which language the team is
associated with.
* membership: A list of the users that are members of the team. See
membership endpoint for more detail.
* last_activity_at: The date of the last activity of any team member
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in, a 401 error is returned.
If the user is not course or global staff, a 403 error is returned.
If the specified team does not exist, a 404 error is returned.
**Response Values for PATCH**
Only staff can patch teams.
If the user is anonymous or inactive, a 401 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
If the user is not course or global staff, does not have discussion
privileges, and the team does exist, a 403 is returned.
If "application/merge-patch+json" is not the specified content type,
a 415 error is returned.
If the update could not be completed due to validation errors, this
method returns a 400 error with all error messages in the
"field_errors" field of the returned JSON.
**Response Values for DELETE**
Only staff can delete teams. When a team is deleted, all
team memberships associated with that team are also
deleted. Returns 204 on successful deletion.
If the user is anonymous or inactive, a 401 is returned.
If the user is not course or global staff and does not
have discussion privileges, a 403 is returned.
If the user is logged in and the team does not exist, a 404 is returned.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsStaffOrPrivilegedOrReadOnly, IsEnrolledOrIsStaff,)
lookup_field = 'team_id'
serializer_class = CourseTeamSerializer
parser_classes = (MergePatchParser,)
def get_queryset(self):
"""Returns the queryset used to access the given team."""
return CourseTeam.objects.all()
def delete(self, request, team_id):
"""DELETE /api/team/v0/teams/{team_id}"""
team = get_object_or_404(CourseTeam, team_id=team_id)
self.check_object_permissions(request, team)
# Note: list() forces the queryset to be evualuated before delete()
memberships = list(CourseTeamMembership.get_memberships(team_id=team_id))
# Note: also deletes all team memberships associated with this team
team.delete()
log.info('user %d deleted team %s', request.user.id, team_id)
emit_team_event('edx.team.deleted', team.course_id, {
'team_id': team_id,
})
for member in memberships:
emit_team_event('edx.team.learner_removed', team.course_id, {
'team_id': team_id,
'remove_method': 'team_deleted',
'user_id': member.user_id
})
return Response(status=status.HTTP_204_NO_CONTENT)
class TopicListView(GenericAPIView):
"""
**Use Cases**
Retrieve a list of topics associated with a single course.
**Example Requests**
GET /api/team/v0/topics/?course_id={course_id}
**Query Parameters for GET**
* course_id: Filters the result to topics belonging to the given
course (required).
* order_by: Orders the results. Currently only 'name' and 'team_count' are supported;
the default value is 'name'. If 'team_count' is specified, topics are returned first sorted
by number of teams per topic (descending), with a secondary sort of 'name'.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the course_id is not given or an unsupported value is passed for
order_by, returns a 400 error.
If the user is not logged in, is not enrolled in the course, or is
not course or global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following
fields:
* count: The total number of topics matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the topics matching the request.
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
pagination_class = TopicsPagination
def get(self, request):
"""GET /api/team/v0/topics/?course_id={course_id}"""
course_id_string = request.query_params.get('course_id', None)
if course_id_string is None:
return Response({
'field_errors': {
'course_id': build_api_error(
ugettext_noop("The supplied course id {course_id} is not valid."),
course_id=course_id_string
)
}
}, status=status.HTTP_400_BAD_REQUEST)
try:
course_id = CourseKey.from_string(course_id_string)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None: # course is None if not found
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
ordering = request.query_params.get('order_by', 'name')
if ordering not in ['name', 'team_count']:
return Response({
'developer_message': "unsupported order_by value {ordering}".format(ordering=ordering),
# Translators: 'ordering' is a string describing a way
# of ordering a list. For example, {ordering} may be
# 'name', indicating that the user wants to sort the
# list by lower case name.
'user_message': _(u"The ordering {ordering} is not supported").format(ordering=ordering),
}, status=status.HTTP_400_BAD_REQUEST)
# Always sort alphabetically, as it will be used as secondary sort
# in the case of "team_count".
topics = get_alphabetical_topics(course_module)
if ordering == 'team_count':
add_team_count(topics, course_id)
topics.sort(key=lambda t: t['team_count'], reverse=True)
page = self.paginate_queryset(topics)
serializer = TopicSerializer(
page,
context={'course_id': course_id},
many=True,
)
else:
page = self.paginate_queryset(topics)
# Use the serializer that adds team_count in a bulk operation per page.
serializer = BulkTeamCountTopicSerializer(page, context={'course_id': course_id}, many=True)
response = self.get_paginated_response(serializer.data)
response.data['sort_order'] = ordering
return response
def get_alphabetical_topics(course_module):
"""Return a list of team topics sorted alphabetically.
Arguments:
course_module (xmodule): the course which owns the team topics
Returns:
list: a list of sorted team topics
"""
return sorted(course_module.teams_topics, key=lambda t: t['name'].lower())
class TopicDetailView(APIView):
"""
**Use Cases**
Retrieve a single topic from a course.
**Example Requests**
GET /api/team/v0/topics/{topic_id},{course_id}
**Query Parameters for GET**
* topic_id: The ID of the topic to retrieve (required).
* course_id: The ID of the course to retrieve the topic from
(required).
**Response Values for GET**
If the user is not logged in, a 401 error is returned.
If the topic_id course_id are not given or an unsupported value is
passed for order_by, returns a 400 error.
If the user is not enrolled in the course, or is not course or
global staff, returns a 403 error.
If the course does not exist, returns a 404 error.
Otherwise, a 200 response is returned containing the following fields:
* id: The topic's unique identifier.
* name: The name of the topic.
* description: A description of the topic.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, topic_id, course_id):
"""GET /api/team/v0/topics/{topic_id},{course_id}/"""
try:
course_id = CourseKey.from_string(course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
# Ensure the course exists
course_module = modulestore().get_course(course_id)
if course_module is None:
return Response(status=status.HTTP_404_NOT_FOUND)
if not has_team_api_access(request.user, course_id):
return Response(status=status.HTTP_403_FORBIDDEN)
topics = [t for t in course_module.teams_topics if t['id'] == topic_id]
if len(topics) == 0:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = TopicSerializer(topics[0], context={'course_id': course_id})
return Response(serializer.data)
class MembershipListView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
List course team memberships or add a user to a course team.
**Example Requests**:
GET /api/team/v0/team_membership
POST /api/team/v0/team_membership
**Query Parameters for GET**
At least one of username and team_id must be provided.
* username: Returns membership records only for the specified user.
If the requesting user is not staff then only memberships for
teams associated with courses in which the requesting user is
enrolled are returned.
* team_id: Returns only membership records associated with the
specified team. The requesting user must be staff or enrolled in
the course associated with the team.
* course_id: Returns membership records only for the specified
course. Username must have access to this course, or else team_id
must be in this course.
* page_size: Number of results to return per page.
* page: Page number to retrieve.
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, the response contains:
* count: The total number of memberships matching the request.
* next: The URL to the next page of results, or null if this is the
last page.
* previous: The URL to the previous page of results, or null if this
is the first page.
* num_pages: The total number of pages in the result.
* results: A list of the memberships matching the request.
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of the user
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If neither team_id nor username are provided, a 400 error is
returned.
If team_id is provided but the team does not exist, a 404 error is
returned.
If the specified course_id is invalid, a 404 error is returned.
This endpoint uses 404 error codes to avoid leaking information
about team or user existence. Specifically, a 404 error will be
returned if a logged in user specifies a team_id for a course
they are not enrolled in.
Additionally, when username is specified the list of returned
memberships will be filtered to memberships in teams associated
with courses that the requesting user is enrolled in.
If the course specified by course_id does not contain the team
specified by team_id, a 400 error is returned.
If the user is not enrolled in the course specified by course_id,
and does not have staff access to the course, a 400 error is
returned.
**Response Values for POST**
Any logged in user enrolled in a course can enroll themselves in a
team in the course. Course staff, global staff, and discussion
privileged users can enroll any user in a team, with a few
exceptions noted below.
If the user is not logged in and active, a 401 error is returned.
If username and team are not provided in the posted JSON, a 400
error is returned describing the missing fields.
If the specified team does not exist, a 404 error is returned.
If the user is not staff, does not have discussion privileges,
and is not enrolled in the course associated with the team they
are trying to join, or if they are trying to add a user other
than themselves to a team, a 404 error is returned. This is to
prevent leaking information about the existence of teams and users.
If the specified user does not exist, a 404 error is returned.
If the user is already a member of a team in the course associated
with the team they are trying to join, a 400 error is returned.
This applies to both staff and students.
If the user is not enrolled in the course associated with the team
they are trying to join, a 400 error is returned. This can occur
when a staff or discussion privileged user posts a request adding
another user to a team.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get(self, request):
"""GET /api/team/v0/team_membership"""
specified_username_or_team = False
username = None
team_id = None
requested_course_id = None
requested_course_key = None
accessible_course_ids = None
if 'course_id' in request.query_params:
requested_course_id = request.query_params['course_id']
try:
requested_course_key = CourseKey.from_string(requested_course_id)
except InvalidKeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
if 'team_id' in request.query_params:
specified_username_or_team = True
team_id = request.query_params['team_id']
try:
team = CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if requested_course_key is not None and requested_course_key != team.course_id:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
if 'username' in request.query_params:
specified_username_or_team = True
username = request.query_params['username']
if not request.user.is_staff:
enrolled_courses = (
CourseEnrollment.enrollments_for_user(request.user).values_list('course_id', flat=True)
)
staff_courses = (
CourseAccessRole.objects.filter(user=request.user, role='staff').values_list('course_id', flat=True)
)
accessible_course_ids = [item for sublist in (enrolled_courses, staff_courses) for item in sublist]
if requested_course_id is not None and requested_course_id not in accessible_course_ids:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not specified_username_or_team:
return Response(
build_api_error(ugettext_noop("username or team_id must be specified.")),
status=status.HTTP_400_BAD_REQUEST
)
course_keys = None
if requested_course_key is not None:
course_keys = [requested_course_key]
elif accessible_course_ids is not None:
course_keys = [CourseKey.from_string(course_string) for course_string in accessible_course_ids]
queryset = CourseTeamMembership.get_memberships(username, course_keys, team_id)
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def post(self, request):
"""POST /api/team/v0/team_membership"""
field_errors = {}
if 'username' not in request.data:
field_errors['username'] = build_api_error(ugettext_noop("Username is required."))
if 'team_id' not in request.data:
field_errors['team_id'] = build_api_error(ugettext_noop("Team id is required."))
if field_errors:
return Response({
'field_errors': field_errors,
}, status=status.HTTP_400_BAD_REQUEST)
try:
team = CourseTeam.objects.get(team_id=request.data['team_id'])
except CourseTeam.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
username = request.data['username']
if not has_team_api_access(request.user, team.course_id, access_username=username):
return Response(status=status.HTTP_404_NOT_FOUND)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
course_module = modulestore().get_course(team.course_id)
if course_module.teams_max_size is not None and team.users.count() >= course_module.teams_max_size:
return Response(
build_api_error(ugettext_noop("This team is already full.")),
status=status.HTTP_400_BAD_REQUEST
)
try:
membership = team.add_user(user)
emit_team_event(
'edx.team.learner_added',
team.course_id,
{
'team_id': team.team_id,
'user_id': user.id,
'add_method': 'joined_from_team_view' if user == request.user else 'added_by_another_user'
}
)
except AlreadyOnTeamInCourse:
return Response(
build_api_error(
ugettext_noop("The user {username} is already a member of a team in this course."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
except NotEnrolledInCourseForTeam:
return Response(
build_api_error(
ugettext_noop("The user {username} is not enrolled in the course associated with this team."),
username=username
),
status=status.HTTP_400_BAD_REQUEST
)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
class MembershipDetailView(ExpandableFieldViewMixin, GenericAPIView):
"""
**Use Cases**
Gets individual course team memberships or removes a user from a course team.
**Example Requests**:
GET /api/team/v0/team_membership/{team_id},{username}
DELETE /api/team/v0/team_membership/{team_id},{username}
**Query Parameters for GET**
* expand: Comma separated list of types for which to return
expanded representations. Supports "user" and "team".
**Response Values for GET**
If the user is logged in and enrolled, or is course or global staff
the response contains:
* user: The user associated with the membership. This field may
contain an expanded or collapsed representation.
* team: The team associated with the membership. This field may
contain an expanded or collapsed representation.
* date_joined: The date and time the membership was created.
* last_activity_at: The date of the last activity of any team member
within the team.
For all text fields, clients rendering the values should take care
to HTML escape them to avoid script injections, as the data is
stored exactly as specified. The intention is that plain text is
supported, not HTML.
If the user is not logged in and active, a 401 error is returned.
If specified team does not exist, a 404 error is returned.
If the user is logged in but is not enrolled in the course
associated with the specified team, or is not staff, a 404 error is
returned. This avoids leaking information about course or team
existence.
If the membership does not exist, a 404 error is returned.
**Response Values for DELETE**
Any logged in user enrolled in a course can remove themselves from
a team in the course. Course staff, global staff, and discussion
privileged users can remove any user from a team. Successfully
deleting a membership will return a 204 response with no content.
If the user is not logged in and active, a 401 error is returned.
If the specified team or username does not exist, a 404 error is
returned.
If the user is not staff or a discussion privileged user and is
attempting to remove another user from a team, a 404 error is
returned. This prevents leaking information about team and user
existence.
If the membership does not exist, a 404 error is returned.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication)
permission_classes = (permissions.IsAuthenticated,)
serializer_class = MembershipSerializer
def get_team(self, team_id):
"""Returns the team with team_id, or throws Http404 if it does not exist."""
try:
return CourseTeam.objects.get(team_id=team_id)
except CourseTeam.DoesNotExist:
raise Http404
def get_membership(self, username, team):
"""Returns the membership for the given user and team, or throws Http404 if it does not exist."""
try:
return CourseTeamMembership.objects.get(user__username=username, team=team)
except CourseTeamMembership.DoesNotExist:
raise Http404
def get(self, request, team_id, username):
"""GET /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if not has_team_api_access(request.user, team.course_id):
return Response(status=status.HTTP_404_NOT_FOUND)
membership = self.get_membership(username, team)
serializer = self.get_serializer(instance=membership)
return Response(serializer.data)
def delete(self, request, team_id, username):
"""DELETE /api/team/v0/team_membership/{team_id},{username}"""
team = self.get_team(team_id)
if has_team_api_access(request.user, team.course_id, access_username=username):
membership = self.get_membership(username, team)
removal_method = 'self_removal'
if 'admin' in request.query_params:
removal_method = 'removed_by_admin'
membership.delete()
emit_team_event(
'edx.team.learner_removed',
team.course_id,
{
'team_id': team.team_id,
'user_id': membership.user.id,
'remove_method': removal_method
}
)
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_404_NOT_FOUND)
| agpl-3.0 |
Scratchcat1/AATC | AATC_Crypto.py | 1 | 9696 | #AATC crypto module
import codecs,recvall,ast,os,AATC_Config,AATC_CryptoBeta
from Crypto.Cipher import AES,PKCS1_OAEP
from Crypto.PublicKey import RSA
class Crypter:
"""
A class to encrypt and decrypt a connection with AES. Takes a connection object and mode and sets up a secret key for both participants.
This key is then used to create an AES object which encrypts and decrypts messages via the Encrypt and Decrypt functions.
These strings must be in binary format. The length of the string will be padded to a length multiple of 16 in size.
The object will communicate with another Crypter object via the socket and pass public keys.
Best for use with communication with string versions of python objects and resulting converstion using 'ast' as this will remove padding whitespace.
If in 'CLIENT' mode the Crypter will start communication. It will send a message ('PublicKey,(Client_Public_Key,)) and the server will return the servers public key.
The CLIENT will then send an 'Exit' message to end communication.
Per default the Crypter will assume it should generate the keys when __init__ is called. This can be disabled by creating the object with AutoGenerate set to False.
Mode can be changed via SetMode. Normal options are 'CLIENT' and 'SERVER'.
"""
def __init__(self, con, mode = "CLIENT",AutoGenerate = True):
self._con = con
self.SetMode(mode)
if AutoGenerate:
self.GenerateKey()
def SetMode(self,mode):
self._mode = mode
def GenerateKey(self,key_size = AATC_Config.DEFAULT_RSA_KEYSIZE):
print("Generating encryption keys. Please stand by...") #Generating keys takes a long time and have found no way to shorted key length
if self._mode == "SERVER":
self.ServerGenerateKey()
elif self._mode == "CLIENT":
self.ClientGenerateKey(key_size)
else:
raise ValueError("Crypter: Incorrect mode set")
print("Encryption keys generated",self._AESKey)
def ClientGenerateKey(self,RSA_KeySize,AES_KeySize= AATC_Config.DEFAULT_AES_KEYSIZE): #Start the relevant key exchange system
if AATC_Config.SET_ENCRYPTION_KEYS_ENABLE: #Allows preset encryption keys to be used
self.SetEncryptionKeys(AATC_Config.SET_AES_KEY, AATC_Config.SET_IV_KEY)
elif AATC_Config.ENCRYPTION_USE_PRESHARED_KEYS: #Uses preshared certificates
self.ClientPreSharedKeys(RSA_KeySize,AES_KeySize)
else:
self.ClientExchangeKeys(RSA_KeySize,AES_KeySize) #Exchange AES keys using RSA
self.Send(("Exit",()))
Sucess,Message,Data = self.SplitData(self.Recv())
if not Sucess:
raise Exception("Server failed to exit"+Message)
def ClientPreSharedKeys(self,RSA_KeySize,AES_KeySize): #Swap keys using certificate authentication
self.Send(("GetServerCertificateChain",()))
Sucess,Message,CertificateChain = self.SplitData(self.Recv())
if not Sucess:
raise Exception("Server did not respond to command")
if AES_KeySize not in AATC_Config.ALLOWED_AES_KEYSIZES:
raise Exception("AES key size not in ALLOWED_AES_KEYSIZES. Change keysize to an allowed value") #Only accept allowed key sizes
AESKey,IV = GenerateKeys(AES_KeySize)
PublicKey = AATC_CryptoBeta.VerifyCertificates(CertificateChain,AATC_Config.ROOT_CERTIFICATES,self._con) #Verify the certificate chain is valid.
if PublicKey: #If the chain is valid
PKO = PKCS1_OAEP.new(RSA.import_key(PublicKey))
EncryptedAESKey = PKO.encrypt(AESKey)
EncryptedIV = PKO.encrypt(IV)
self.SetEncryptionKeys(AESKey,IV)
self.Send(("SetKey",(EncryptedAESKey,EncryptedIV))) #Swap keys encrypted using server public key
Sucess,Message,Data = self.SplitData(self.Recv())
if not Sucess:
raise Exception("Server rejected setting AES_Keys"+Message)
else:
print("Certificate Chain is not valid")
if AATC_Config.AUTO_GENERATE_FALLBACK:
self.ClientExchangeKeys(RSA_KeySize,AES_KeySize) #Fall back on key exchange if allowed
else:
raise Exception("Certificate Chain is not valid. Exception raised") #Raise exception if not allowed
def ClientExchangeKeys(self,RSA_KeySize,AES_KeySize): #Exchange the keys using RSA encryption
RSAKey = RSA.generate(RSA_KeySize)
privateKey = RSAKey.exportKey("DER")
publicKey = RSAKey.publickey().exportKey("DER") #Get the RAS keys.
self.Send(("GenerateKey",(publicKey,AES_KeySize)))
Sucess,Message,data = self.SplitData(self.Recv())
RSAPrivateKey = RSA.import_key(privateKey)
RSAPrivateObject = PKCS1_OAEP.new(RSAPrivateKey)
AESKey = RSAPrivateObject.decrypt(data[0]) #Decrypt the recieved keys
IV = RSAPrivateObject.decrypt(data[1])
if Sucess == False:
raise Exception("Error occured while exchanging keys")
self.SetEncryptionKeys(AESKey,IV)
################################################################
def ServerGenerateKey(self): #Server select the correct mode.
if AATC_Config.SET_ENCRYPTION_KEYS_ENABLE:
self.SetEncryptionKeys(AATC_Config.SET_AES_KEY, AATC_Config.SET_IV_KEY) #Use preset keys if enabled
self._Exit = False
while not self._Exit: #Start a server type loop (responds to commands from client)
data = self.Recv()
Command, Arguments = data[0],data[1]
#Respond to relevant command
if Command == "GenerateKey":
Sucess,Message,Data = self.ServerGenerateKeys(Arguments)
elif Command == "GetServerCertificateChain":
Sucess,Message,Data = self.GetServerCertificateChain(Arguments)
elif Command == "SetKey":
Sucess,Message,Data = self.ServerSetKey(Arguments)
elif Command == "Exit":
Sucess,Message,Data = True,"Exiting",[]
self._Exit = True
else:
Sucess,Message,Data = False,"Command does not exist",[]
self.Send((Sucess,Message,Data))
if not hasattr(self,"_AESKey"): #Only set if sucessfully setup.
raise Exception("Failure during crypter setup")
def ServerGenerateKeys(self,Arguments): #Generate keys and encrypt with the provided RSA key
publicKey,AES_KeySize = Arguments[0],Arguments[1]
if AES_KeySize not in AATC_Config.ALLOWED_AES_KEYSIZES:
AES_KeySize = AATC_Config.DEFAULT_AES_KEYSIZE #If key size is not valid set size to default of AATC_Config.DEFAULT_AES_KEYSIZE
AESKey,IV = GenerateKeys(AES_KeySize)
PublicKeyObject = PKCS1_OAEP.new( RSA.import_key(publicKey))
EncryptedAESKey = PublicKeyObject.encrypt(AESKey)
EncryptedIV = PublicKeyObject.encrypt(IV) #Encrypt AES keys
self.SetEncryptionKeys(AESKey,IV)
return True,"Instated encryption keys",[EncryptedAESKey,EncryptedIV] #Return values to be sent
def GetServerCertificateChain(self,Arguments = None): #Respond to request to get certificate chain.
return True,"Server Certificate Chain",AATC_Config.SERVER_CERTIFICATE_CHAIN
def ServerSetKey(self,Arguments): #Set provided keys encrypted with public key of server
PKO = PKCS1_OAEP.new(RSA.import_key(AATC_Config.SERVER_PRIVATE_KEY))
AESKey,IV = Arguments[0],Arguments[1]
AESKey,IV = PKO.decrypt(AESKey),PKO.decrypt(IV) #Decrypt keys
if len(AESKey) in AATC_Config.ALLOWED_AES_KEYSIZES:
self.SetEncryptionKeys(AESKey,IV)
return True,"Keys set",[]
else:
#self._Exit = True
return False,"AES key size not in ALLOWED_AES_KEYSIZES:"+str(AATC_Config.ALLOWED_AES_KEYSIZES),[]
###############################################
def SetEncryptionKeys(self,AESKey,IV): #Set the encryption keys and AES encryption objects
self._AESKey = AESKey
self._IV = IV
self._EncryptAES = AES.new(self._AESKey,AES.MODE_GCM,self._IV) #Two seperate instances to encrypt and decrypt as non ECB AES is a stream cipher
self._DecryptAES = AES.new(self._AESKey,AES.MODE_GCM,self._IV) #Errors will occur if encrypt and decrypt are not equal in count.
##############################################
def Encrypt(self,data):
return self._EncryptAES.encrypt(data)
def Decrypt(self,data):
return self._DecryptAES.decrypt(data)
def Send(self,data):
self._con.sendall(codecs.encode(str(data)))
def Recv(self):
data = recvall.recvall(self._con)
data = ast.literal_eval(codecs.decode(data))
return data
def SplitData(self,data):
return data[0],data[1],data[2]
def GenerateKeys(AES_KeySize): #Creates random keys using source which is cryptographically random
AESKey = os.urandom(AES_KeySize) # Here to allow regeneration of AES key while still in loop if required.
IV = os.urandom(AES_KeySize)
return AESKey,IV
| gpl-3.0 |
dimitri-justeau/niamoto-core | tests/data_providers/test_base_plot_provider.py | 2 | 14591 | # coding: utf-8
import unittest
from geoalchemy2.shape import from_shape, WKTElement
from shapely.geometry import Point
from niamoto.testing import set_test_path
set_test_path()
from niamoto.conf import settings
from niamoto.data_providers.base_plot_provider import *
from niamoto.db import metadata as niamoto_db_meta
from niamoto.db.connector import Connector
from niamoto.db.utils import fix_db_sequences
from niamoto.testing.base_tests import BaseTestNiamotoSchemaCreated
from niamoto.testing.test_data_provider import TestDataProvider
from niamoto.testing.test_database_manager import TestDatabaseManager
from niamoto.testing import test_data
class TestBasePlotProvider(BaseTestNiamotoSchemaCreated):
"""
Test case for base plot provider.
"""
@classmethod
def setUpClass(cls):
super(TestBasePlotProvider, cls).setUpClass()
data_provider_1 = TestDataProvider.register_data_provider(
'test_data_provider_1',
)
data_provider_2 = TestDataProvider.register_data_provider(
'test_data_provider_2',
)
TestDataProvider.register_data_provider('test_data_provider_3')
plot_1 = test_data.get_plot_data_1(data_provider_1)
plot_2 = test_data.get_plot_data_2(data_provider_2)
ins = niamoto_db_meta.plot.insert().values(plot_1 + plot_2)
with Connector.get_connection() as connection:
connection.execute(ins)
fix_db_sequences()
def test_get_current_plot_data(self):
"""
:return: Test for get_current_plot_data_method.
Test the structure of the returned DataFrame.
Test retrieving an empty DataFrame.
Test retrieving a not-empty DataFrame.
"""
data_provider_1 = TestDataProvider('test_data_provider_1')
data_provider_2 = TestDataProvider('test_data_provider_2')
data_provider_3 = TestDataProvider('test_data_provider_3')
with Connector.get_connection() as connection:
pp1 = BasePlotProvider(data_provider_1)
pp2 = BasePlotProvider(data_provider_2)
pp3 = BasePlotProvider(data_provider_3)
df1 = pp1.get_niamoto_plot_dataframe(connection)
df2 = pp2.get_niamoto_plot_dataframe(connection)
df3 = pp3.get_niamoto_plot_dataframe(connection)
self.assertEqual(len(df1), 4)
self.assertEqual(len(df2), 2)
self.assertEqual(len(df3), 0)
# 2. Check the structure of the DataFrame
df_cols = list(df1.columns) + [df1.index.name, ]
db_cols = niamoto_db_meta.plot.columns
for db_col in db_cols:
self.assertIn(db_col.name, df_cols)
def test_get_insert_dataframe(self):
data_provider_1 = TestDataProvider('test_data_provider_1')
with Connector.get_connection() as connection:
pp1 = BasePlotProvider(data_provider_1)
df1 = pp1.get_niamoto_plot_dataframe(connection)
# 1. Nothing to insert
plot_1 = pd.DataFrame.from_records([
{
'id': 0,
'name': 'plot_1_1',
'location': from_shape(Point(166.5521, -22.0939), srid=4326),
'properties': '{}',
},
], index='id')
ins = pp1.get_insert_dataframe(df1, plot_1)
self.assertEqual(len(ins), 0)
# 2. Everything to insert
plot_2 = pd.DataFrame.from_records([
{
'id': 10,
'name': 'plot_1_11',
'location': from_shape(Point(166.5521, -22.0939), srid=4326),
'properties': '{}',
},
{
'id': 11,
'name': 'plot_1_12',
'location': from_shape(Point(166.551, -22.098), srid=4326),
'properties': '{}',
},
], index='id')
ins = pp1.get_insert_dataframe(df1, plot_2)
self.assertIn('provider_pk', ins.columns)
self.assertIn('provider_id', ins.columns)
self.assertEqual(len(ins[pd.isnull(ins['provider_pk'])]), 0)
self.assertEqual(len(ins[pd.isnull(ins['provider_id'])]), 0)
self.assertEqual(len(ins), 2)
# 3. Partial insert
plot_3 = pd.DataFrame.from_records([
{
'id': 0,
'name': 'plot_1_1',
'location': from_shape(Point(166.5521, -22.0939), srid=4326),
'properties': '{}',
},
{
'id': 11,
'name': 'plot_1_12',
'location': from_shape(Point(166.551, -22.098), srid=4326),
'properties': '{}',
},
], index='id')
ins = pp1.get_insert_dataframe(df1, plot_3)
self.assertEqual(len(ins), 1)
def test_get_update_dataframe(self):
data_provider_1 = TestDataProvider('test_data_provider_1')
with Connector.get_connection() as connection:
pp1 = BasePlotProvider(data_provider_1)
df1 = pp1.get_niamoto_plot_dataframe(connection)
# 1. Nothing to update
plot_1 = pd.DataFrame.from_records([
{
'id': 10,
'name': 'plot_1_11',
'location': from_shape(Point(166.5521, -22.0939), srid=4326),
'properties': '{}',
},
], index='id')
update_df = pp1.get_update_dataframe(df1, plot_1)
self.assertIn('provider_pk', update_df.columns)
self.assertIn('provider_id', update_df.columns)
self.assertEqual(
len(update_df[pd.isnull(update_df['provider_pk'])]),
0
)
self.assertEqual(
len(update_df[pd.isnull(update_df['provider_id'])]),
0
)
self.assertEqual(len(update_df), 0)
# 2. Everything to update
plot_2 = pd.DataFrame.from_records([
{
'id': 0,
'name': 'plot_1_a',
'location': from_shape(Point(166.5521, -22.0939), srid=4326),
'properties': '{}',
},
{
'id': 1,
'name': 'plot_1_b',
'location': from_shape(Point(166.551, -22.098), srid=4326),
'properties': '{}',
},
{
'id': 2,
'name': 'plot_1_c',
'location': from_shape(Point(166.552, -22.097), srid=4326),
'properties': '{}',
},
{
'id': 5,
'name': 'plot_1_d',
'location': from_shape(Point(166.553, -22.099), srid=4326),
'properties': '{}',
},
], index='id')
update_df = pp1.get_update_dataframe(df1, plot_2)
self.assertEqual(len(update_df), 4)
# 3. Partial update
plot_3 = pd.DataFrame.from_records([
{
'id': 0,
'name': 'plot_1_z',
'location': from_shape(Point(166.5521, -22.0939), srid=4326),
'properties': '{}',
},
{
'id': 1,
'name': 'plot_1_zzz',
'location': from_shape(Point(166.551, -22.098), srid=4326),
'properties': '{}',
},
], index='id')
update_df = pp1.get_update_dataframe(df1, plot_3)
self.assertEqual(len(update_df), 2)
def test_get_delete_dataframe(self):
data_provider_1 = TestDataProvider('test_data_provider_1')
with Connector.get_connection() as connection:
pp1 = BasePlotProvider(data_provider_1)
df1 = pp1.get_niamoto_plot_dataframe(connection)
# 1. Nothing to delete
plot_1 = pd.DataFrame.from_records([
{
'id': 0,
'name': 'plot_1_1',
'location': from_shape(Point(166.5521, -22.0939), srid=4326),
'properties': '{}',
},
{
'id': 1,
'name': 'plot_1_2',
'location': from_shape(Point(166.551, -22.098), srid=4326),
'properties': '{}',
},
{
'id': 2,
'location': from_shape(Point(166.552, -22.097), srid=4326),
'properties': '{}',
},
{
'id': 5,
'location': from_shape(Point(166.553, -22.099), srid=4326),
'properties': '{}',
},
], index='id')
delete_df = pp1.get_delete_dataframe(df1, plot_1)
self.assertIn('provider_pk', delete_df.columns)
self.assertIn('provider_id', delete_df.columns)
self.assertEqual(
len(delete_df[pd.isnull(delete_df['provider_pk'])]),
0
)
self.assertEqual(
len(delete_df[pd.isnull(delete_df['provider_id'])]),
0
)
self.assertEqual(len(delete_df), 0)
self.assertEqual(len(delete_df[pd.isnull(delete_df['location'])]), 0)
# 2. Everything to delete
plot_2 = pd.DataFrame.from_records([
{
'id': 10,
'name': 'plot_1_11',
'location': from_shape(Point(166.5521, -22.0939), srid=4326),
'properties': '{}',
},
], index='id')
delete_df = pp1.get_delete_dataframe(df1, plot_2)
self.assertEqual(len(delete_df), 4)
self.assertEqual(len(delete_df[pd.isnull(delete_df['location'])]), 0)
# 3. Partial delete
plot_3 = pd.DataFrame.from_records([
{
'id': 0,
'name': 'plot_1_1',
'location': from_shape(Point(166.5521, -22.0939), srid=4326),
'properties': '{}',
},
], index='id')
delete_df = pp1.get_delete_dataframe(df1, plot_3)
self.assertEqual(len(delete_df), 3)
self.assertEqual(len(delete_df[pd.isnull(delete_df['location'])]), 0)
def test_sync_insert(self):
self.tearDownClass()
self.setUpClass()
data_provider_3 = TestDataProvider('test_data_provider_3')
with Connector.get_connection() as connection:
pp3 = BasePlotProvider(data_provider_3)
self.assertEqual(
len(pp3.get_niamoto_plot_dataframe(connection)),
0
)
pl = pd.DataFrame.from_records([
{
'id': 0,
'name': 'plot_3_1',
'location': from_shape(
Point(166.5521, -22.0939),
srid=4326
),
'properties': '{}',
},
{
'id': 1,
'name': 'plot_3_2',
'location': from_shape(
Point(166.551, -22.098),
srid=4326
),
'properties': '{}',
},
], index='id')
i, u, d = pp3._sync(pl, connection)
self.assertEqual(len(i), 2)
self.assertEqual(len(u), 0)
self.assertEqual(len(d), 0)
self.assertEqual(
len(pp3.get_niamoto_plot_dataframe(connection)),
2
)
def test_sync_update(self):
self.tearDownClass()
self.setUpClass()
data_provider_1 = TestDataProvider('test_data_provider_1')
with Connector.get_connection() as connection:
pp1 = BasePlotProvider(data_provider_1)
self.assertEqual(
len(pp1.get_niamoto_plot_dataframe(connection)),
4
)
pl = pd.DataFrame.from_records([
{
'id': 0,
'name': "plot_1",
'properties': None,
'location': WKTElement(
Point(166.5521, -22.0939).wkt,
srid=4326
),
'properties': '{}',
},
{
'id': 1,
'name': 'plot_b',
'properties': None,
'location': from_shape(Point(166.551, -22.098), srid=4326),
'properties': '{}',
},
{
'id': 2,
'name': 'plot_c',
'properties': {'yo': 'yo'},
'location': from_shape(Point(166.552, -22.097), srid=4326),
'properties': '{}',
},
{
'id': 5,
'name': 'plot_d',
'properties': {},
'location': WKTElement(
Point(166.553, -22.099),
srid=4326
),
'properties': '{}',
},
], index='id')
i, u, d = pp1._sync(pl, connection)
self.assertEqual(len(i), 0)
self.assertEqual(len(u), 4)
self.assertEqual(len(d), 0)
self.assertEqual(
len(pp1.get_niamoto_plot_dataframe(connection)),
4
)
def test_sync_delete(self):
self.tearDownClass()
self.setUpClass()
data_provider_1 = TestDataProvider('test_data_provider_1')
with Connector.get_connection() as connection:
pp1 = BasePlotProvider(data_provider_1)
self.assertEqual(
len(pp1.get_niamoto_plot_dataframe(connection)),
4
)
occ = pd.DataFrame.from_records(
[],
index='id',
columns=('id', 'location')
)
i, u, d = pp1._sync(occ, connection)
self.assertEqual(len(i), 0)
self.assertEqual(len(u), 0)
self.assertEqual(len(d), 4)
self.assertEqual(
len(pp1.get_niamoto_plot_dataframe(connection)),
0
)
if __name__ == '__main__':
TestDatabaseManager.setup_test_database()
TestDatabaseManager.create_schema(settings.NIAMOTO_SCHEMA)
TestDatabaseManager.create_schema(settings.NIAMOTO_RASTER_SCHEMA)
TestDatabaseManager.create_schema(settings.NIAMOTO_VECTOR_SCHEMA)
unittest.main(exit=False)
TestDatabaseManager.teardown_test_database()
| gpl-3.0 |
Jimmy-Morzaria/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 13 | 43295 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_auto(self):
# partial_fit with class_weight='auto' not supported
assert_raises_regexp(ValueError,
"class_weight 'auto' is not supported for "
"partial_fit. In order to use 'auto' weights, "
"use compute_class_weight\('auto', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='auto').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_auto_weight(self):
# Test class weights for imbalanced data
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto", shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
MaxKellermann/xbmc | addons/metadata.demo.artists/demo.py | 39 | 5674 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import xbmcplugin,xbmcgui,xbmc,xbmcaddon
import os,sys,urllib
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params=get_params()
try:
action=urllib.unquote_plus(params["action"])
except:
pass
if action == 'find':
try:
artist=urllib.unquote_plus(params["artist"])
except:
pass
print('Find artist with name %s' %(artist))
liz=xbmcgui.ListItem('Demo artist 1', thumbnailImage='DefaultAlbum.png', offscreen=True)
liz.setProperty('artist.genre', 'rock / pop')
liz.setProperty('artist.born', '2002')
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url="/path/to/artist", listitem=liz, isFolder=True)
liz=xbmcgui.ListItem('Demo artist 2', thumbnailImage='DefaultAlbum.png', offscreen=True)
liz.setProperty('artist.genre', 'classical / jazz')
liz.setProperty('artist.born', '2012')
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url="/path/to/artist2", listitem=liz, isFolder=True)
elif action == 'resolveid':
liz=xbmcgui.ListItem(path='/path/to/artist2', offscreen=True)
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=liz)
elif action == 'getdetails':
url=urllib.unquote_plus(params["url"])
print('Artist with url %s' %(url))
if url == '/path/to/artist':
liz=xbmcgui.ListItem('Demo artist 1', offscreen=True)
liz.setProperty('artist.musicbrainzid', '123')
liz.setProperty('artist.genre', 'rock / pop')
liz.setProperty('artist.styles', 'heavy / light')
liz.setProperty('artist.moods', 'angry / happy')
liz.setProperty('artist.years_active', '1980 / 2012')
liz.setProperty('artist.instruments', 'guitar / drums')
liz.setProperty('artist.born', '1/1/2001')
liz.setProperty('artist.formed', '1980')
liz.setProperty('artist.biography', 'Wrote lots of crap. Likes to torture cats.')
liz.setProperty('artist.died', 'Tomorrow.')
liz.setProperty('artist.disbanded', 'Dec 21 2012')
liz.setProperty('artist.fanarts', '2')
liz.setProperty('artist.fanart1.url', 'DefaultBackFanart.png')
liz.setProperty('artist.fanart1.preview', 'DefaultBackFanart.png')
liz.setProperty('artist.fanart1.dim', '720')
liz.setProperty('artist.fanart2.url', '/home/akva/Pictures/hawaii-shirt.png')
liz.setProperty('artist.fanart2.preview', '/home/akva/Pictures/hawaii-shirt.png')
liz.setProperty('artist.fanart2.dim', '1080')
liz.setProperty('artist.albums', '2')
liz.setProperty('artist.album1.title', 'Demo album 1')
liz.setProperty('artist.album1.year', '2002')
liz.setProperty('artist.album2.title', 'Demo album 2')
liz.setProperty('artist.album2.year', '2007')
liz.setProperty('artist.thumbs', '2')
liz.setProperty('artist.thumb1.url', 'DefaultBackFanart.png')
liz.setProperty('artist.thumb1.aspect', '1.78')
liz.setProperty('artist.thumb2.url', '/home/akva/Pictures/hawaii-shirt.png')
liz.setProperty('artist.thumb2.aspect', '2.35')
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=liz)
if url == '/path/to/artist2':
liz=xbmcgui.ListItem('Demo artist 2', thumbnailImage='DefaultAlbum.png', offscreen=True)
liz.setProperty('artist.musicbrainzid', '456')
liz.setProperty('artist.genre', 'classical / jazz')
liz.setProperty('artist.styles', 'morbid / funny')
liz.setProperty('artist.moods', 'fast / dance')
liz.setProperty('artist.years_active', '1990 / 2016')
liz.setProperty('artist.instruments', 'bass / flute')
liz.setProperty('artist.born', '2/2/1971')
liz.setProperty('artist.formed', '1990')
liz.setProperty('artist.biography', 'Tortured lots of cats. Likes crap.')
liz.setProperty('artist.died', 'Yesterday.')
liz.setProperty('artist.disbanded', 'Nov 20 1980')
liz.setProperty('artist.fanarts', '2')
liz.setProperty('artist.fanart1.thumb', 'DefaultBackFanart.png')
liz.setProperty('artist.fanart1.dim', '720')
liz.setProperty('artist.fanart2.thumb', '/home/akva/Pictures/gnome-tshirt.png')
liz.setProperty('artist.fanart2.dim', '1080')
liz.setProperty('artist.albums', '2')
liz.setProperty('artist.album1.title', 'Demo album 1')
liz.setProperty('artist.album1.year', '2002')
liz.setProperty('artist.album2.title', 'Demo album 2')
liz.setProperty('artist.album2.year', '2005')
liz.setProperty('artist.thumbs', '2')
liz.setProperty('artist.thumb1.url', 'DefaultBackFanart.png')
liz.setProperty('artist.thumb1.aspect', '1.78')
liz.setProperty('artist.thumb2.url', '/home/akva/Pictures/hawaii-shirt.png')
liz.setProperty('artist.thumb2.aspect', '2.35')
xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=liz)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 |
robocomp/robocomp | tools/rcmonitor/examples/imu.py | 1 | 5090 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 by RoboLab - University of Extremadura
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import Ice, sys, math, traceback
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class C(QWidget):
def __init__(self, endpoint, modules):
QWidget.__init__(self)
self.Lista=list();
self.ic = Ice.initialize(sys.argv)
self.mods = modules
self.prx = self.ic.stringToProxy(endpoint)
self.proxy = self.mods['RoboCompIMU'].IMUPrx.checkedCast(self.prx)
self.accLabelx = QLabel("Acc X", self)
self.accLabelx.show()
self.accLabelx.move(5,8)
self.green=QColor("green")
self.accx = QLCDNumber(self)
self.accx.show()
self.accx.setSegmentStyle(QLCDNumber.Flat)
self.accx.move(45,8)
self.accLabely = QLabel("Acc Y", self)
self.accLabely.show()
self.accLabely.move(5,30)
self.accy = QLCDNumber(self)
self.accy.show()
self.accy.setSegmentStyle(QLCDNumber.Flat)
self.accy.move(45,30)
self.accLabelz = QLabel("Acc Z", self)
self.accLabelz.show()
self.accLabelz.move(5,52)
self.accz = QLCDNumber(self)
self.accz.show()
self.accz.setSegmentStyle(QLCDNumber.Flat)
self.accz.move(45,52)
self.velLabelx = QLabel("gyrX", self)
self.velLabelx.show()
self.velLabelx.move(130,8)
self.green=QColor("green")
self.gyrx = QLCDNumber(self)
self.gyrx.show()
self.gyrx.setSegmentStyle(QLCDNumber.Flat)
self.gyrx.move(165,8)
self.velLabely = QLabel("gyrY", self)
self.velLabely.show()
self.velLabely.move(130,30)
self.green=QColor("green")
self.gyry = QLCDNumber(self)
self.gyry.show()
self.gyry.setSegmentStyle(QLCDNumber.Flat)
self.gyry.move(165,30)
self.velLabelz = QLabel("gyrZ", self)
self.velLabelz.show()
self.velLabelz.move(130,52)
self.green=QColor("green")
self.gyrz = QLCDNumber(self)
self.gyrz.show()
self.gyrz.setSegmentStyle(QLCDNumber.Flat)
self.gyrz.move(165,52)
self.magLabelx = QLabel("MagX", self)
self.magLabelx.show()
self.magLabelx.move(265,8)
self.green=QColor("green")
self.magx = QLCDNumber(self)
self.magx.show()
self.magx.setSegmentStyle(QLCDNumber.Flat)
self.magx.move(305,8)
self.magLabely = QLabel("MagY", self)
self.magLabely.show()
self.magLabely.move(265,30)
self.green=QColor("green")
self.magy = QLCDNumber(self)
self.magy.show()
self.magy.setSegmentStyle(QLCDNumber.Flat)
self.magy.move(305,30)
self.magLabelz = QLabel("MagZ", self)
self.magLabelz.show()
self.magLabelz.move(265,52)
self.green=QColor("green")
self.magz = QLCDNumber(self)
self.magz.show()
self.magz.setSegmentStyle(QLCDNumber.Flat)
self.magz.move(305,52)
self.rollLabel = QLabel("ROLL", self)
self.rollLabel.show()
self.rollLabel.move(400,8)
self.green=QColor("green")
self.roll = QLCDNumber(self)
self.roll.show()
self.roll.setSegmentStyle(QLCDNumber.Flat)
self.roll.move(435,8)
self.pichLabel = QLabel("PICH", self)
self.pichLabel.show()
self.pichLabel.move(400,30)
self.green=QColor("green")
self.pich = QLCDNumber(self)
self.pich.show()
self.pich.setSegmentStyle(QLCDNumber.Flat)
self.pich.move(435,30)
self.yawLabel = QLabel("YAW", self)
self.yawLabel.show()
self.yawLabel.move(400,52)
self.green=QColor("green")
self.yaw = QLCDNumber(self)
self.yaw.show()
self.yaw.setSegmentStyle(QLCDNumber.Flat)
self.yaw.move(435,52)
self.tempLabel = QLabel("TEMP", self)
self.tempLabel.show()
self.tempLabel.move(535,52)
self.green=QColor("green")
self.temp = QLCDNumber(self)
self.temp.show()
self.temp.setSegmentStyle(QLCDNumber.Flat)
self.temp.move(670,52)
self.button = QPushButton("reset", self)
self.button.move(600,20)
self.connect(self.button, SIGNAL("clicked()"), self.resetSlot)
self.button.show()
self.show()
def resetSlot(self):
self.proxy.resetImu()
def job(self):
self.a = self.proxy.getDataImu( )
self.accx.display( self.a.acc.XAcc )
self.accy.display( self.a.acc.YAcc )
self.accz.display( self.a.acc.ZAcc )
self.gyrx.display( self.a.gyro.XGyr )
self.gyry.display( self.a.gyro.YGyr )
self.gyrz.display( self.a.gyro.ZGyr )
self.magx.display( self.a.mag.XMag )
self.magy.display( self.a.mag.YMag )
self.magz.display( self.a.mag.ZMag )
self.roll.display( self.a.rot.Roll )
self.pich.display( self.a.rot.Pitch )
self.yaw.display( self.a.rot.Yaw )
self.temp.display( self.a.temperature )
| gpl-3.0 |
LambdaCast/LambdaCast | portal/BitTornadoABC/BitTornado/ServerPortHandler.py | 2 | 5780 | # Written by John Hoffman
# see LICENSE.txt for license information
from cStringIO import StringIO
#from RawServer import RawServer
try:
True
except:
True = 1
False = 0
from BT1.Encrypter import protocol_name
default_task_id = []
class SingleRawServer:
def __init__(self, info_hash, multihandler, doneflag, protocol):
self.info_hash = info_hash
self.doneflag = doneflag
self.protocol = protocol
self.multihandler = multihandler
self.rawserver = multihandler.rawserver
self.finished = False
self.running = False
self.handler = None
self.taskqueue = []
def shutdown(self):
if not self.finished:
self.multihandler.shutdown_torrent(self.info_hash)
def _shutdown(self):
if not self.finished:
self.finished = True
self.running = False
self.rawserver.kill_tasks(self.info_hash)
if self.handler:
self.handler.close_all()
def _external_connection_made(self, c, options, already_read):
if self.running:
c.set_handler(self.handler)
self.handler.externally_handshaked_connection_made(
c, options, already_read)
### RawServer functions ###
def add_task(self, func, delay=0, id = default_task_id):
if id is default_task_id:
id = self.info_hash
if not self.finished:
self.rawserver.add_task(func, delay, id)
# def bind(self, port, bind = '', reuse = False):
# pass # not handled here
def start_connection(self, dns, handler = None):
if not handler:
handler = self.handler
c = self.rawserver.start_connection(dns, handler)
return c
# def listen_forever(self, handler):
# pass # don't call with this
def start_listening(self, handler):
self.handler = handler
self.running = True
return self.shutdown # obviously, doesn't listen forever
def is_finished(self):
return self.finished
def get_exception_flag(self):
return self.rawserver.get_exception_flag()
class NewSocketHandler: # hand a new socket off where it belongs
def __init__(self, multihandler, connection):
self.multihandler = multihandler
self.connection = connection
connection.set_handler(self)
self.closed = False
self.buffer = StringIO()
self.complete = False
self.next_len, self.next_func = 1, self.read_header_len
self.multihandler.rawserver.add_task(self._auto_close, 15)
def _auto_close(self):
if not self.complete:
self.close()
def close(self):
if not self.closed:
self.connection.close()
self.closed = True
# header format:
# connection.write(chr(len(protocol_name)) + protocol_name +
# (chr(0) * 8) + self.encrypter.download_id + self.encrypter.my_id)
# copied from Encrypter and modified
def read_header_len(self, s):
l = ord(s)
return l, self.read_header
def read_header(self, s):
self.protocol = s
return 8, self.read_reserved
def read_reserved(self, s):
self.options = s
return 20, self.read_download_id
def read_download_id(self, s):
if self.multihandler.singlerawservers.has_key(s):
if self.multihandler.singlerawservers[s].protocol == self.protocol:
return True
return None
def data_came_in(self, garbage, s):
while 1:
if self.closed:
return
i = self.next_len - self.buffer.tell()
if i > len(s):
self.buffer.write(s)
return
self.buffer.write(s[:i])
s = s[i:]
m = self.buffer.getvalue()
self.buffer.reset()
self.buffer.truncate()
try:
x = self.next_func(m)
except:
self.next_len, self.next_func = 1, self.read_dead
raise
if x is None:
self.close()
return
if x == True: # ready to process
self.multihandler.singlerawservers[m]._external_connection_made(
self.connection, self.options, s)
self.complete = True
return
self.next_len, self.next_func = x
def connection_flushed(self, ss):
pass
def connection_lost(self, ss):
self.closed = True
class MultiHandler:
def __init__(self, rawserver, doneflag):
self.rawserver = rawserver
self.masterdoneflag = doneflag
self.singlerawservers = {}
self.connections = {}
self.taskqueues = {}
def newRawServer(self, info_hash, doneflag, protocol=protocol_name):
new = SingleRawServer(info_hash, self, doneflag, protocol)
self.singlerawservers[info_hash] = new
return new
def shutdown_torrent(self, info_hash):
self.singlerawservers[info_hash]._shutdown()
del self.singlerawservers[info_hash]
def listen_forever(self):
self.rawserver.listen_forever(self)
for srs in self.singlerawservers.values():
srs.finished = True
srs.running = False
srs.doneflag.set()
### RawServer handler functions ###
# be wary of name collisions
def external_connection_made(self, ss):
NewSocketHandler(self, ss)
| bsd-2-clause |
santiavenda2/waliki | waliki_project/waliki_project/settings.py | 3 | 3424 | """
Django settings for waliki_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jk-=be97@zpbdrl6a89%e*b7x5#ir&8kb#o1@$u5u$10fp#w^l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
"waliki.context_processors.settings"
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'waliki_project',
'waliki',
'waliki.git',
'waliki.pdf',
'waliki.slides',
'waliki.attachments',
'waliki.togetherjs',
'analytical',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.twitter',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'waliki_project.urls'
WSGI_APPLICATION = 'waliki_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
SENDFILE_BACKEND = 'sendfile.backends.simple'
SENDFILE_ROOT = 'protected_downloads/protected'
SENDFILE_URL = '/protected'
WALIKI_LOGGED_USER_PERMISSIONS = ('view_page', 'add_page', 'change_page', 'delete_page')
GOOGLE_ANALYTICS_PROPERTY_ID = 'UA-12345-6'
try:
from .local_settings import * # noqa
except:
pass
| bsd-3-clause |
frankyrumple/ope | libs/gluon/contrib/login_methods/extended_login_form.py | 44 | 3526 | #!/usr/bin/env python
# coding: utf8
"""
ExtendedLoginForm is used to extend normal login form in web2py with one more login method.
So user can choose the built-in login or extended login methods.
"""
from gluon import current, DIV
class ExtendedLoginForm(object):
"""
Put extended_login_form under web2py/gluon/contrib/login_methods folder.
Then inside your model where defines the auth:
auth = Auth(globals(),db) # authentication/authorization
...
auth.define_tables() # You might like to put the code after auth.define_tables
... # if the alt_login_form deals with tables of auth.
alt_login_form = RPXAccount(request,
api_key="...",
domain="...",
url = "http://localhost:8000/%s/default/user/login" % request.application)
extended_login_form = ExtendedLoginForm(
auth, alt_login_form, signals=['token'])
auth.settings.login_form = extended_login_form
Note:
Since rpx_account doesn't create the password for the user, you
might need to provide a way for user to create password to do
normal login.
"""
def __init__(self,
auth,
alt_login_form,
signals=[],
login_arg='login'
):
self.auth = auth
self.alt_login_form = alt_login_form
self.signals = signals
self.login_arg = login_arg
def get_user(self):
"""
Delegate the get_user to alt_login_form.get_user.
"""
if hasattr(self.alt_login_form, 'get_user'):
return self.alt_login_form.get_user()
return None # let gluon.tools.Auth.get_or_create_user do the rest
def login_url(self, next):
"""
Optional implement for alt_login_form.
In normal case, this should be replaced by get_user, and never get called.
"""
if hasattr(self.alt_login_form, 'login_url'):
return self.alt_login_form.login_url(next)
return self.auth.settings.login_url
def logout_url(self, next):
"""
Optional implement for alt_login_form.
Called if bool(alt_login_form.get_user) is True.
If alt_login_form implemented logout_url function, it will return that function call.
"""
if hasattr(self.alt_login_form, 'logout_url'):
return self.alt_login_form.logout_url(next)
return next
def login_form(self):
"""
Combine the auth() form with alt_login_form.
If signals are set and a parameter in request matches any signals,
it will return the call of alt_login_form.login_form instead.
So alt_login_form can handle some particular situations, for example,
multiple steps of OpenID login inside alt_login_form.login_form.
Otherwise it will render the normal login form combined with
alt_login_form.login_form.
"""
request = current.request
args = request.args
if (self.signals and
any([True for signal in self.signals if signal in request.vars])
):
return self.alt_login_form.login_form()
self.auth.settings.login_form = self.auth
form = DIV(self.auth())
self.auth.settings.login_form = self
form.components.append(self.alt_login_form.login_form())
return form
| mit |
rjschwei/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/policy/models/policy_assignment.py | 1 | 1774 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PolicyAssignment(Model):
"""The policy definition.
:param display_name: The display name of the policy assignment.
:type display_name: str
:param policy_definition_id: The ID of the policy definition.
:type policy_definition_id: str
:param scope: The scope for the policy assignment.
:type scope: str
:param id: The ID of the policy assignment.
:type id: str
:param type: The type of the policy assignment.
:type type: str
:param name: The name of the policy assignment.
:type name: str
"""
_attribute_map = {
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'policy_definition_id': {'key': 'properties.policyDefinitionId', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, display_name=None, policy_definition_id=None, scope=None, id=None, type=None, name=None):
self.display_name = display_name
self.policy_definition_id = policy_definition_id
self.scope = scope
self.id = id
self.type = type
self.name = name
| mit |
guoxu3/oms_backend | oms/handlers/initialize.py | 1 | 3953 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
initialize handlers
"""
import tornado.web
import tornado.escape
import tornado.ioloop
from lib import verify, encrypt, mail
from db import db_machine
from lib.salt_api import SaltAPI as sapi
import json
import check
from lib.logger import log
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor
class InitializeHandler(tornado.web.RequestHandler):
executor = ThreadPoolExecutor(5)
def data_received(self, chunk):
pass
def __init__(self, application, request, **kwargs):
super(InitializeHandler, self).__init__(application, request, **kwargs)
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with, content-type")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS, DELETE')
self.token = self.get_secure_cookie("access_token")
self.handler_permission = '8'
self.get_permission = '8.1'
self.post_permission = '8.2'
def get(self):
ip = self.get_argument('ip', None)
software = self.get_argument('software', None)
status = self.get_argument('status', 0)
result = db_machine.update_initialize_status(ip, software, status)
if result:
ok = True
info = 'Update initialize status successful'
else:
ok = False
info = 'Update initialize status failed'
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
def post(self):
post_initialize_permission = '8.2.1'
post_install_permission = '8.2.2'
ok, info = check.check_login(self.token)
if not ok:
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
return
ok, info = check.check_content_type(self.request)
if not ok:
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
return
body = json.loads(self.request.body)
action, data = body['action'], body['data']
if action == 'initialize':
local_permission_list = [self.handler_permission, self.post_permission, post_initialize_permission]
ok, info, _ = verify.has_permission(self.token, local_permission_list)
if not ok:
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
return
tornado.ioloop.IOLoop.instance().add_callback(self.machine_initialize(data['ip']))
ok = True
info = "Initializing..."
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
return
if action == 'install':
local_permission_list = [self.handler_permission, self.post_permission, post_install_permission]
ok, info, _ = verify.has_permission(self.token, local_permission_list)
if not ok:
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
return
tornado.ioloop.IOLoop.instance().add_callback(self.install_software(data['ip'], data['software']))
ok = True
info = 'Software installing...'
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
return
ok = False
info = 'Unsupported task action'
self.finish(tornado.escape.json_encode({'ok': ok, 'info': info}))
@run_on_executor
def machine_initialize(self, ip):
result = sapi.run_script([ip], 'salt://scripts/initialize.sh', 'initialize')
log.info(result)
@run_on_executor
def install_software(self, ip, software):
result = sapi.run_script([ip], 'salt://scripts/install_software.sh', software)
log.info(result)
def options(self):
pass
handlers = [
('/api/initialize', InitializeHandler),
]
| mit |
jumpstarter-io/horizon | openstack_dashboard/dashboards/project/loadbalancers/tables.py | 2 | 11905 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils import http
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
class AddPoolLink(tables.LinkAction):
name = "addpool"
verbose_name = _("Add Pool")
url = "horizon:project:loadbalancers:addpool"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool"),)
class AddVipLink(tables.LinkAction):
name = "addvip"
verbose_name = _("Add VIP")
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:addvip",
kwargs={'pool_id': pool.id})
return base_url
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class AddMemberLink(tables.LinkAction):
name = "addmember"
verbose_name = _("Add Member")
url = "horizon:project:loadbalancers:addmember"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_member"),)
class AddMonitorLink(tables.LinkAction):
name = "addmonitor"
verbose_name = _("Add Monitor")
url = "horizon:project:loadbalancers:addmonitor"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_health_monitor"),)
class DeleteVipLink(tables.DeleteAction):
name = "deletevip"
action_present = _("Delete")
action_past = _("Scheduled deletion of %(data_type)s")
data_type_singular = _("VIP")
data_type_plural = _("VIPs")
policy_rules = (("network", "delete_vip"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class DeletePoolLink(tables.DeleteAction):
name = "deletepool"
action_present = _("Delete")
action_past = _("Scheduled deletion of %(data_type)s")
data_type_singular = _("Pool")
data_type_plural = _("Pools")
policy_rules = (("network", "delete_pool"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def allowed(self, request, datum=None):
if datum and datum.vip_id:
return False
return True
class DeleteMonitorLink(tables.DeleteAction):
name = "deletemonitor"
action_present = _("Delete")
action_past = _("Scheduled deletion of %(data_type)s")
data_type_singular = _("Monitor")
data_type_plural = _("Monitors")
policy_rules = (("network", "delete_health_monitor"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
class DeleteMemberLink(tables.DeleteAction):
name = "deletemember"
action_present = _("Delete")
action_past = _("Scheduled deletion of %(data_type)s")
data_type_singular = _("Member")
data_type_plural = _("Members")
policy_rules = (("network", "delete_member"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
class UpdatePoolLink(tables.LinkAction):
name = "updatepool"
verbose_name = _("Edit Pool")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_pool"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatepool",
kwargs={'pool_id': pool.id})
return base_url
class UpdateVipLink(tables.LinkAction):
name = "updatevip"
verbose_name = _("Edit VIP")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_vip"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:updatevip",
kwargs={'vip_id': pool.vip_id})
return base_url
def allowed(self, request, datum=None):
if datum and not datum.vip_id:
return False
return True
class UpdateMemberLink(tables.LinkAction):
name = "updatemember"
verbose_name = _("Edit Member")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_member"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def get_link_url(self, member):
base_url = reverse("horizon:project:loadbalancers:updatemember",
kwargs={'member_id': member.id})
return base_url
class UpdateMonitorLink(tables.LinkAction):
name = "updatemonitor"
verbose_name = _("Edit Monitor")
classes = ("ajax-modal", "btn-update",)
policy_rules = (("network", "update_health_monitor"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def get_link_url(self, monitor):
base_url = reverse("horizon:project:loadbalancers:updatemonitor",
kwargs={'monitor_id': monitor.id})
return base_url
def get_vip_link(pool):
if pool.vip_id:
return reverse("horizon:project:loadbalancers:vipdetails",
args=(http.urlquote(pool.vip_id),))
else:
return None
class AddPMAssociationLink(tables.LinkAction):
name = "addassociation"
verbose_name = _("Associate Monitor")
url = "horizon:project:loadbalancers:addassociation"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_pool_health_monitor"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def allowed(self, request, datum=None):
try:
tenant_id = request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(request,
tenant_id=tenant_id)
for m in monitors:
if m.id not in datum['health_monitors']:
return True
except Exception:
exceptions.handle(request,
_('Failed to retrieve health monitors.'))
return False
class DeletePMAssociationLink(tables.LinkAction):
name = "deleteassociation"
verbose_name = _("Disassociate Monitor")
url = "horizon:project:loadbalancers:deleteassociation"
classes = ("ajax-modal", "btn-danger")
icon = "remove"
policy_rules = (("network", "delete_pool_health_monitor"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def allowed(self, request, datum=None):
if datum and not datum['health_monitors']:
return False
return True
class PoolsTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:loadbalancers:pooldetails")
description = tables.Column('description', verbose_name=_("Description"))
provider = tables.Column('provider', verbose_name=_("Provider"),
filters=(lambda v: filters.default(v, _('N/A')),))
subnet_name = tables.Column('subnet_name', verbose_name=_("Subnet"))
protocol = tables.Column('protocol', verbose_name=_("Protocol"))
status = tables.Column('status', verbose_name=_("Status"))
vip_name = tables.Column('vip_name', verbose_name=_("VIP"),
link=get_vip_link)
class Meta:
name = "poolstable"
verbose_name = _("Pools")
table_actions = (AddPoolLink, DeletePoolLink)
row_actions = (UpdatePoolLink, AddVipLink, UpdateVipLink,
DeleteVipLink, AddPMAssociationLink,
DeletePMAssociationLink, DeletePoolLink)
def get_pool_link(member):
return reverse("horizon:project:loadbalancers:pooldetails",
args=(http.urlquote(member.pool_id),))
def get_member_link(member):
return reverse("horizon:project:loadbalancers:memberdetails",
args=(http.urlquote(member.id),))
class MembersTable(tables.DataTable):
address = tables.Column('address',
verbose_name=_("IP Address"),
link=get_member_link,
attrs={'data-type': "ip"})
protocol_port = tables.Column('protocol_port',
verbose_name=_("Protocol Port"))
weight = tables.Column('weight',
verbose_name=_("Weight"))
pool_name = tables.Column('pool_name',
verbose_name=_("Pool"), link=get_pool_link)
status = tables.Column('status', verbose_name=_("Status"))
class Meta:
name = "memberstable"
verbose_name = _("Members")
table_actions = (AddMemberLink, DeleteMemberLink)
row_actions = (UpdateMemberLink, DeleteMemberLink)
def get_monitor_details(monitor):
if monitor.type in ('HTTP', 'HTTPS'):
return ("%(http_method)s %(url_path)s => %(codes)s" %
{'http_method': monitor.http_method,
'url_path': monitor.url_path,
'codes': monitor.expected_codes})
else:
return _("-")
class MonitorsTable(tables.DataTable):
monitor_type = tables.Column(
"type", verbose_name=_("Monitor Type"),
link="horizon:project:loadbalancers:monitordetails")
delay = tables.Column("delay", verbose_name=_("Delay"))
timeout = tables.Column("timeout", verbose_name=_("Timeout"))
max_retries = tables.Column("max_retries", verbose_name=_("Max Retries"))
details = tables.Column(get_monitor_details, verbose_name=_("Details"))
class Meta:
name = "monitorstable"
verbose_name = _("Monitors")
table_actions = (AddMonitorLink, DeleteMonitorLink)
row_actions = (UpdateMonitorLink, DeleteMonitorLink)
| apache-2.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/lib2to3/pgen2/grammar.py | 2 | 4953 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
import pickle
# Local imports
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print("s2n")
pprint(self.symbol2number)
print("n2s")
pprint(self.number2symbol)
print("states")
pprint(self.states)
print("dfas")
pprint(self.dfas)
print("labels")
pprint(self.labels)
print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
| mit |
ledtvavs/repository.ledtv | script.module.nanscrapers/lib/nanscrapers/modules/js2py/legecy_translators/objects.py | 11 | 11176 | """ This module removes all objects/arrays from JS source code and replace them with LVALS.
Also it has s function translating removed object/array to python code.
Use this module just after removing constants. Later move on to removing functions"""
OBJECT_LVAL = 'PyJsLvalObject%d_'
ARRAY_LVAL = 'PyJsLvalArray%d_'
from utils import *
from jsparser import *
from nodevisitor import exp_translator
import functions
from flow import KEYWORD_METHODS
def FUNC_TRANSLATOR(*a):# stupid import system in python
raise RuntimeError('Remember to set func translator. Thank you.')
def set_func_translator(ftrans):
# stupid stupid Python or Peter
global FUNC_TRANSLATOR
FUNC_TRANSLATOR = ftrans
def is_empty_object(n, last):
"""n may be the inside of block or object"""
if n.strip():
return False
# seems to be but can be empty code
last = last.strip()
markers = {')', ';',}
if not last or last[-1] in markers:
return False
return True
# todo refine this function
def is_object(n, last):
"""n may be the inside of block or object.
last is the code before object"""
if is_empty_object(n, last):
return True
if not n.strip():
return False
#Object contains lines of code so it cant be an object
if len(argsplit(n, ';'))>1:
return False
cands = argsplit(n, ',')
if not cands[-1].strip():
return True # {xxxx,} empty after last , it must be an object
for cand in cands:
cand = cand.strip()
# separate each candidate element at : in dict and check whether they are correct...
kv = argsplit(cand, ':')
if len(kv) > 2: # set the len of kv to 2 because of this stupid : expression
kv = kv[0],':'.join(kv[1:])
if len(kv)==2:
# key value pair, check whether not label or ?:
k, v = kv
if not is_lval(k.strip()):
return False
v = v.strip()
if v.startswith('function'):
continue
#will fail on label... {xxx: while {}}
if v[0]=='{': # value cant be a code block
return False
for e in KEYWORD_METHODS:
# if v starts with any statement then return false
if v.startswith(e) and len(e)<len(v) and v[len(e)] not in IDENTIFIER_PART:
return False
elif not (cand.startswith('set ') or cand.startswith('get ')):
return False
return True
def is_array(last):
#it can be prop getter
last = last.strip()
if any(endswith_keyword(last, e) for e in ['return', 'new', 'void', 'throw', 'typeof', 'in', 'instanceof']):
return True
markers = {')', ']'}
return not last or not (last[-1] in markers or last[-1] in IDENTIFIER_PART)
def remove_objects(code, count=1):
""" This function replaces objects with OBJECTS_LVALS, returns new code, replacement dict and count.
count arg is the number that should be added to the LVAL of the first replaced object
"""
replacements = {} #replacement dict
br = bracket_split(code, ['{}', '[]'])
res = ''
last = ''
for e in br:
#test whether e is an object
if e[0]=='{':
n, temp_rep, cand_count = remove_objects(e[1:-1], count)
# if e was not an object then n should not contain any :
if is_object(n, last):
#e was an object
res += ' '+OBJECT_LVAL % count
replacements[OBJECT_LVAL % count] = e
count += 1
else:
# e was just a code block but could contain objects inside
res += '{%s}' % n
count = cand_count
replacements.update(temp_rep)
elif e[0]=='[':
if is_array(last):
res += e # will be translated later
else: # prop get
n, rep, count = remove_objects(e[1:-1], count)
res += '[%s]' % n
replacements.update(rep)
else: # e does not contain any objects
res += e
last = e #needed to test for this stipid empty object
return res, replacements, count
def remove_arrays(code, count=1):
"""removes arrays and replaces them with ARRAY_LVALS
returns new code and replacement dict
*NOTE* has to be called AFTER remove objects"""
res = ''
last = ''
replacements = {}
for e in bracket_split(code, ['[]']):
if e[0]=='[':
if is_array(last):
name = ARRAY_LVAL % count
res += ' ' + name
replacements[name] = e
count += 1
else: # pseudo array. But pseudo array can contain true array. for example a[['d'][3]] has 2 pseudo and 1 true array
cand, new_replacements, count = remove_arrays(e[1:-1], count)
res += '[%s]' % cand
replacements.update(new_replacements)
else:
res += e
last = e
return res, replacements, count
def translate_object(obj, lval, obj_count=1, arr_count=1):
obj = obj[1:-1] # remove {} from both ends
obj, obj_rep, obj_count = remove_objects(obj, obj_count)
obj, arr_rep, arr_count = remove_arrays(obj, arr_count)
# functions can be defined inside objects. exp translator cant translate them.
# we have to remove them and translate with func translator
# its better explained in translate_array function
obj, hoisted, inline = functions.remove_functions(obj, all_inline=True)
assert not hoisted
gsetters_after = ''
keys = argsplit(obj)
res = []
for i, e in enumerate(keys, 1):
e = e.strip()
if e.startswith('set '):
gsetters_after += translate_setter(lval, e)
elif e.startswith('get '):
gsetters_after += translate_getter(lval, e)
elif ':' not in e:
if i<len(keys): # can happen legally only in the last element {3:2,}
raise SyntaxError('Unexpected "," in Object literal')
break
else: #Not getter, setter or elision
spl = argsplit(e, ':')
if len(spl)<2:
raise SyntaxError('Invalid Object literal: '+e)
try:
key, value = spl
except: #len(spl)> 2
print 'Unusual case ' + repr(e)
key = spl[0]
value = ':'.join(spl[1:])
key = key.strip()
if is_internal(key):
key = '%s.to_string().value' % key
else:
key = repr(key)
value = exp_translator(value)
if not value:
raise SyntaxError('Missing value in Object literal')
res.append('%s:%s' % (key, value))
res = '%s = Js({%s})\n' % (lval, ','.join(res)) + gsetters_after
# translate all the nested objects (including removed earlier functions)
for nested_name, nested_info in inline.iteritems(): # functions
nested_block, nested_args = nested_info
new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)
res = new_def + res
for lval, obj in obj_rep.iteritems(): #objects
new_def, obj_count, arr_count = translate_object(obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
res = new_def + res
for lval, obj in arr_rep.iteritems(): # arrays
new_def, obj_count, arr_count = translate_array(obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
res = new_def + res
return res, obj_count, arr_count
def translate_setter(lval, setter):
func = 'function' + setter[3:]
try:
_, data, _ = functions.remove_functions(func)
if not data or len(data)>1:
raise Exception()
except:
raise SyntaxError('Could not parse setter: '+setter)
prop = data.keys()[0]
body, args = data[prop]
if len(args)!=1: #setter must have exactly 1 argument
raise SyntaxError('Invalid setter. It must take exactly 1 argument.')
# now messy part
res = FUNC_TRANSLATOR('setter', body, args)
res += "%s.define_own_property(%s, {'set': setter})\n"%(lval, repr(prop))
return res
def translate_getter(lval, getter):
func = 'function' + getter[3:]
try:
_, data, _ = functions.remove_functions(func)
if not data or len(data)>1:
raise Exception()
except:
raise SyntaxError('Could not parse getter: '+getter)
prop = data.keys()[0]
body, args = data[prop]
if len(args)!=0: #setter must have exactly 0 argument
raise SyntaxError('Invalid getter. It must take exactly 0 argument.')
# now messy part
res = FUNC_TRANSLATOR('getter', body, args)
res += "%s.define_own_property(%s, {'get': setter})\n"%(lval, repr(prop))
return res
def translate_array(array, lval, obj_count=1, arr_count=1):
"""array has to be any js array for example [1,2,3]
lval has to be name of this array.
Returns python code that adds lval to the PY scope it should be put before lval"""
array = array[1:-1]
array, obj_rep, obj_count = remove_objects(array, obj_count)
array, arr_rep, arr_count = remove_arrays(array, arr_count)
#functions can be also defined in arrays, this caused many problems since in Python
# functions cant be defined inside literal
# remove functions (they dont contain arrays or objects so can be translated easily)
# hoisted functions are treated like inline
array, hoisted, inline = functions.remove_functions(array, all_inline=True)
assert not hoisted
arr = []
# separate elements in array
for e in argsplit(array, ','):
# translate expressions in array PyJsLvalInline will not be translated!
e = exp_translator(e.replace('\n', ''))
arr.append(e if e else 'None')
arr = '%s = Js([%s])\n' % (lval, ','.join(arr))
#But we can have more code to add to define arrays/objects/functions defined inside this array
# translate nested objects:
# functions:
for nested_name, nested_info in inline.iteritems():
nested_block, nested_args = nested_info
new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)
arr = new_def + arr
for lval, obj in obj_rep.iteritems():
new_def, obj_count, arr_count = translate_object(obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr
for lval, obj in arr_rep.iteritems():
new_def, obj_count, arr_count = translate_array(obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr
return arr, obj_count, arr_count
if __name__=='__main__':
test = 'a = {404:{494:19}}; b = 303; if () {f={:}; { }}'
#print remove_objects(test)
#print list(bracket_split(' {}'))
print
print remove_arrays('typeof a&&!db.test(a)&&!ib[(bb.exec(a)||["",""], [][[5][5]])[1].toLowerCase()])')
print is_object('', ')')
| gpl-3.0 |
simbha/mAngE-Gin | lib/django/template/loader_tags.py | 22 | 9700 | from collections import defaultdict
from django.conf import settings
from django.template.base import TemplateSyntaxError, Library, Node, TextNode,\
token_kwargs, Variable
from django.template.loader import get_template
from django.utils.safestring import mark_safe
from django.utils import six
register = Library()
BLOCK_CONTEXT_KEY = 'block_context'
class ExtendsError(Exception):
pass
class BlockContext(object):
def __init__(self):
# Dictionary of FIFO queues.
self.blocks = defaultdict(list)
def add_blocks(self, blocks):
for name, block in six.iteritems(blocks):
self.blocks[name].insert(0, block)
def pop(self, name):
try:
return self.blocks[name].pop()
except IndexError:
return None
def push(self, name, block):
self.blocks[name].append(block)
def get_block(self, name):
try:
return self.blocks[name][-1]
except IndexError:
return None
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
with context.push():
if block_context is None:
context['block'] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without thread-safety issues.
block = type(self)(block.name, block.nodelist)
block.context = context
context['block'] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
return result
def super(self):
if not hasattr(self, 'context'):
raise TemplateSyntaxError(
"'%s' object has no attribute 'context'. Did you use "
"{{ block.super }} in a base template?" % self.__class__.__name__
)
render_context = self.context.render_context
if (BLOCK_CONTEXT_KEY in render_context and
render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
class ExtendsNode(Node):
must_be_first = True
def __init__(self, nodelist, parent_name, template_dirs=None):
self.nodelist = nodelist
self.parent_name = parent_name
self.template_dirs = template_dirs
self.blocks = dict((n.name, n) for n in nodelist.get_nodes_by_type(BlockNode))
def __repr__(self):
return '<ExtendsNode: extends %s>' % self.parent_name.token
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name.filters or\
isinstance(self.parent_name.var, Variable):
error_msg += " Got this from the '%s' variable." %\
self.parent_name.token
raise TemplateSyntaxError(error_msg)
if hasattr(parent, 'render'):
return parent # parent is a Template object
return get_template(parent)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = dict((n.name, n) for n in
compiled_parent.nodelist.get_nodes_by_type(BlockNode))
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
return compiled_parent._render(context)
class IncludeNode(Node):
def __init__(self, template, *args, **kwargs):
self.template = template
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(IncludeNode, self).__init__(*args, **kwargs)
def render(self, context):
try:
template = self.template.resolve(context)
# Does this quack like a Template?
if not callable(getattr(template, 'render', None)):
# If not, we'll try get_template
template = get_template(template)
values = {
name: var.resolve(context)
for name, var in six.iteritems(self.extra_context)
}
if self.isolated_context:
return template.render(context.new(values))
with context.push(**values):
return template.render(context)
except Exception:
if settings.TEMPLATE_DEBUG:
raise
return ''
@register.tag('block')
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock',))
# This check is kept for backwards-compatibility. See #3100.
endblock = parser.next_token()
acceptable_endblocks = ('endblock', 'endblock %s' % block_name)
if endblock.contents not in acceptable_endblocks:
parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks)
return BlockNode(block_name, nodelist)
@register.tag('extends')
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string) or as
the parent template itself (if it evaluates to a Template object).
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0])
return ExtendsNode(nodelist, parent_name)
@register.tag('include')
def do_include(parser, token):
"""
Loads a template and renders it with the current context. You can pass
additional context using keyword arguments.
Example::
{% include "foo/some_include" %}
{% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}
Use the ``only`` argument to exclude the current context when rendering
the included template::
{% include "foo/some_include" only %}
{% include "foo/some_include" with bar="1" only %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"%r tag takes at least one argument: the name of the template to "
"be included." % bits[0]
)
options = {}
remaining_bits = bits[2:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap,
isolated_context=isolated_context)
| mit |
grg2rsr/line_scan_traces_extractor | interactive_traces_extration.py | 1 | 6843 | # -*- coding: utf-8 -*-
"""
written by Georg Raiser 20.10.2015
questions/bugs to grg2rsr@gmail.com
"""
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import scipy as sp
import sys
import os
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import tifffile
class interactive_traces_extract(object):
"""
interactively extract traces from line scan images. Image has to be in
the format of x: time, y: line index, so each horizontal line of pixels
represents one scan of the traces, the line below is the next etc.
"""
def __init__(self,image_path,prestim_frames=None):
self.path = image_path
## ini data
self.data = self.read_image(self.path)
self.nLines = self.data.shape[0]
self.nPlaces = self.data.shape[1]
if prestim_frames:
Fstart,Fstop = prestim_frames
bck = sp.average(self.data[Fstart:Fstop,:],axis=0)[sp.newaxis,:]
self.data = (self.data - bck) / bck
## ini UI
# Image
im_params = {'interpolation':'none',
'cmap':'jet',
'extent':[0,self.nPlaces,self.nLines,0],
'origin':'upper',
'aspect':sp.float32(self.data.shape[1]) / self.data.shape[0]}
AxesImage = plt.imshow(self.data,**im_params)
self.im_ax = AxesImage.axes
self.im_fig = AxesImage.figure
self.im_ax.set_xlabel('place [px]')
self.im_ax.set_ylabel('line number')
# coordinate calc
self.pos = int(self.nPlaces/2) # is the position of the mouse pointer
self.width = 11 # is x1 - x0
self.xs = self.calc_x(self.pos,self.width) # is a tuple (x0,x1) along which is sliced
# add patch
rect_params = {'facecolor':'red',
'alpha':0.5}
self.Rect = Rectangle(self.xs,self.width,self.nLines,**rect_params)
self.im_ax.add_patch(self.Rect)
# extracted traces preview
self.traces_fig = plt.figure()
self.traces_ax = self.traces_fig.add_subplot(111)
tempTrace_params = {'linewidth':2,
'color':'red'}
self.tempTrace, = self.traces_ax.plot(sp.zeros(self.nLines),**tempTrace_params)
self.traces_ax.set_xlabel('line number')
if prestim_frames:
self.traces_ax.set_ylabel('dF/F')
else:
self.traces_ax.set_ylabel('intensity [au]')
## extracting info
self.coords = []
self.traces = []
# hooking up the interactive handles
self.im_fig.canvas.mpl_connect('button_press_event', self.mouse_clicked_event)
self.im_fig.canvas.mpl_connect('scroll_event',self.scroll_event)
self.im_fig.canvas.mpl_connect('motion_notify_event',self.mouse_moved_event)
self.im_fig.canvas.mpl_connect('close_event', self.close_event)
plt.show()
pass
### input output
def read_image(self,path):
""" dummy reader, to be extended for other file formats """
return tifffile.imread(path)
def write_output(self):
""" write ouput upon closing the image figure """
outpath = os.path.splitext(self.path)[0]+'_traces.csv'
if len(self.coords) > 0:
print 'writing to ' + outpath
coors = pd.DataFrame(sp.array(self.coords).T,index=['x0','x1'])
values = pd.DataFrame(sp.vstack(self.traces).T)
Df = pd.concat([coors,values])
Df.to_csv(outpath)
else:
print "exiting without saving anything"
def close_event(self,event):
self.write_output()
plt.close('all')
def calc_x(self,pos,width):
""" calculate x0, x1 (slice extent) based on current pos and width """
if width == 1:
x0 = pos
x1 = pos + 1
else:
x0 = pos - (width-1)/2
x1 = pos + (width-1)/2
return (x0,x1)
def scroll_event(self,event):
""" changes width of slice """
if event.button == 'up':
self.width += 2
if event.button == 'down':
self.width -= 2
self.width = sp.clip(self.width,1,self.nPlaces)
self.xs = self.calc_x(self.pos,self.width)
self.Rect.set_xy((self.xs[0] ,0))
self.Rect.set_width(self.width)
self.update()
def mouse_moved_event(self,event):
""" x position of mouse determines center of slice """
if event.inaxes == self.im_ax:
self.pos = int(event.xdata)
self.update()
def mouse_clicked_event(self,event):
""" middle button click slices """
if event.button==2:
self.coords.append(self.xs)
self.traces.append(self.slice_trace(*self.xs))
self.traces_ax.plot(self.slice_trace(*self.xs),lw=1,color='grey')
rect = Rectangle((self.xs[0],0),self.width,self.nLines,facecolor='grey',alpha=0.5)
self.im_ax.add_patch(rect)
def slice_trace(self,x0,x1):
sliced = sp.average(self.data[:,x0:x1],axis=1)
return sliced
def update(self):
""" UI update """
# calc new pos
self.xs = self.calc_x(self.pos,self.width)
# update rect
self.Rect.set_xy((self.xs[0] ,0))
# get new slice
self.tempTrace.set_ydata(self.slice_trace(*self.xs))
# update traces preview
self.traces_ax.relim()
self.traces_ax.autoscale_view(True,True,True)
self.traces_fig.canvas.draw()
# update figure
self.im_fig.canvas.draw()
if __name__ == '__main__':
### testing
# path = '/home/georg/python/line_scan_traces_extractor/test_data/ant2_L1_Sum17.lsm'
# prestim_frames = (0,40)
doc = """ A small python tool to interactively extract time traces from functional imaging line scans. The Image data shape has to be x = place and y = repetition. Usage: run this file with the following arguments <path_to_file> <start> <stop>. Start and stop are optional and mark the index bounds used for background used in dF/F calculation. If ommited, raw values are used."""
### nontesting
if len(sys.argv) == 1:
print doc
sys.exit()
if len(sys.argv) == 2:
path = sys.argv[1]
prestim_frames = None
if len(sys.argv) == 4:
path = sys.argv[1]
prestim_frames = (int(sys.argv[2]),int(sys.argv[3]))
interactive_traces_extract(path,prestim_frames=prestim_frames)
| gpl-2.0 |
AladdinSonni/youtube-dl | youtube_dl/extractor/teachertube.py | 148 | 4651 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
qualities,
determine_ext,
)
class TeacherTubeIE(InfoExtractor):
IE_NAME = 'teachertube'
IE_DESC = 'teachertube.com videos'
_VALID_URL = r'https?://(?:www\.)?teachertube\.com/(viewVideo\.php\?video_id=|music\.php\?music_id=|video/(?:[\da-z-]+-)?|audio/)(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.teachertube.com/viewVideo.php?video_id=339997',
'md5': 'f9434ef992fd65936d72999951ee254c',
'info_dict': {
'id': '339997',
'ext': 'mp4',
'title': 'Measures of dispersion from a frequency table',
'description': 'Measures of dispersion from a frequency table',
'thumbnail': 're:http://.*\.jpg',
},
}, {
'url': 'http://www.teachertube.com/viewVideo.php?video_id=340064',
'md5': '0d625ec6bc9bf50f70170942ad580676',
'info_dict': {
'id': '340064',
'ext': 'mp4',
'title': 'How to Make Paper Dolls _ Paper Art Projects',
'description': 'Learn how to make paper dolls in this simple',
'thumbnail': 're:http://.*\.jpg',
},
}, {
'url': 'http://www.teachertube.com/music.php?music_id=8805',
'md5': '01e8352006c65757caf7b961f6050e21',
'info_dict': {
'id': '8805',
'ext': 'mp3',
'title': 'PER ASPERA AD ASTRA',
'description': 'RADIJSKA EMISIJA ZRAKOPLOVNE TEHNI?KE ?KOLE P',
},
}, {
'url': 'http://www.teachertube.com/video/intro-video-schleicher-297790',
'md5': '9c79fbb2dd7154823996fc28d4a26998',
'info_dict': {
'id': '297790',
'ext': 'mp4',
'title': 'Intro Video - Schleicher',
'description': 'Intro Video - Why to flip, how flipping will',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta('title', webpage, 'title', fatal=True)
TITLE_SUFFIX = ' - TeacherTube'
if title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)].strip()
description = self._html_search_meta('description', webpage, 'description')
if description:
description = description.strip()
quality = qualities(['mp3', 'flv', 'mp4'])
media_urls = re.findall(r'data-contenturl="([^"]+)"', webpage)
media_urls.extend(re.findall(r'var\s+filePath\s*=\s*"([^"]+)"', webpage))
media_urls.extend(re.findall(r'\'file\'\s*:\s*["\']([^"\']+)["\'],', webpage))
formats = [
{
'url': media_url,
'quality': quality(determine_ext(media_url))
} for media_url in set(media_urls)
]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': self._html_search_regex(r'\'image\'\s*:\s*["\']([^"\']+)["\']', webpage, 'thumbnail'),
'formats': formats,
'description': description,
}
class TeacherTubeUserIE(InfoExtractor):
IE_NAME = 'teachertube:user:collection'
IE_DESC = 'teachertube.com user and collection videos'
_VALID_URL = r'https?://(?:www\.)?teachertube\.com/(user/profile|collection)/(?P<user>[0-9a-zA-Z]+)/?'
_MEDIA_RE = r'''(?sx)
class="?sidebar_thumb_time"?>[0-9:]+</div>
\s*
<a\s+href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)"
'''
_TEST = {
'url': 'http://www.teachertube.com/user/profile/rbhagwati2',
'info_dict': {
'id': 'rbhagwati2'
},
'playlist_mincount': 179,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user_id = mobj.group('user')
urls = []
webpage = self._download_webpage(url, user_id)
urls.extend(re.findall(self._MEDIA_RE, webpage))
pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[:-1]
for p in pages:
more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p)
webpage = self._download_webpage(more, user_id, 'Downloading page %s/%s' % (p, len(pages)))
video_urls = re.findall(self._MEDIA_RE, webpage)
urls.extend(video_urls)
entries = [self.url_result(vurl, 'TeacherTube') for vurl in urls]
return self.playlist_result(entries, user_id)
| unlicense |
stormi/weblate | weblate/trans/views/charts.py | 10 | 4145 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Charting library for Weblate.
'''
from weblate.trans.models import Change
from weblate.lang.models import Language
from weblate.trans.views.helper import get_project_translation
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.utils.translation import pgettext
import json
def get_json_stats(request, days, step, project=None, subproject=None,
lang=None, user=None):
"""
Parse json stats URL params.
"""
if project is None and lang is None and user is None:
project = None
subproject = None
translation = None
language = None
user = None
elif user is not None:
project = None
subproject = None
translation = None
language = None
user = get_object_or_404(User, username=user)
elif project is None:
project = None
subproject = None
translation = None
language = get_object_or_404(Language, code=lang)
user = None
else:
# Process parameters
project, subproject, translation = get_project_translation(
request,
project,
subproject,
lang
)
language = None
user = None
# Get actual stats
return Change.objects.base_stats(
days,
step,
project,
subproject,
translation,
language,
user
)
def yearly_activity(request, project=None, subproject=None, lang=None,
user=None):
"""
Returns yearly activity for matching changes as json.
"""
activity = get_json_stats(
request, 364, 7,
project, subproject, lang
)
# Format
serie = []
labels = []
month = -1
for item in activity:
serie.append(item[1])
if month != item[0].month:
labels.append(
pgettext(
'Format string for yearly activity chart',
'{month}/{year}'
).format(
month=item[0].month,
year=item[0].year,
)
)
month = item[0].month
else:
labels.append('')
return HttpResponse(
content_type='application/json',
content=json.dumps({'series': [serie], 'labels': labels})
)
def monthly_activity(request, project=None, subproject=None, lang=None,
user=None):
"""
Returns monthly activity for matching changes as json.
"""
activity = get_json_stats(
request, 31, 1,
project, subproject, lang
)
# Format
serie = []
labels = []
for pos, item in enumerate(activity):
serie.append(item[1])
if pos % 5 == 0:
labels.append(
pgettext(
'Format string for monthly activity chart',
'{day}/{month}'
).format(
day=item[0].day,
month=item[0].month,
year=item[0].year,
)
)
else:
labels.append('')
return HttpResponse(
content_type='application/json',
content=json.dumps({'series': [serie], 'labels': labels})
)
| gpl-3.0 |
alanjw/GreenOpenERP-Win-X86 | python/Lib/site-packages/docutils/utils/math/__init__.py | 160 | 1683 | # :Id: $Id: __init__.py 7218 2011-11-08 17:42:40Z milde $
# :Author: Guenter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
"""
This is the Docutils (Python Documentation Utilities) "math" sub-package.
It contains various modules for conversion between different math formats
(LaTeX, MathML, HTML).
:math2html: LaTeX math -> HTML conversion from eLyXer
:latex2mathml: LaTeX math -> presentational MathML
:unichar2tex: Unicode character to LaTeX math translation table
:tex2unichar: LaTeX math to Unicode character translation dictionaries
"""
# helpers for Docutils math support
# =================================
def pick_math_environment(code, numbered=False):
"""Return the right math environment to display `code`.
The test simply looks for line-breaks (``\\``) outside environments.
Multi-line formulae are set with ``align``, one-liners with
``equation``.
If `numbered` evaluates to ``False``, the "starred" versions are used
to suppress numbering.
"""
# cut out environment content:
chunks = code.split(r'\begin{')
toplevel_code = ''.join([chunk.split(r'\end{')[-1]
for chunk in chunks])
if toplevel_code.find(r'\\') >= 0:
env = 'align'
else:
env = 'equation'
if not numbered:
env += '*'
return env
| agpl-3.0 |
raysguy/HTPC-Manager | libs/cherrypy/test/test_states.py | 20 | 18195 | import os
import signal
import socket
import sys
import time
import unittest
import warnings
import cherrypy
import cherrypy.process.servers
from cherrypy._cpcompat import BadStatusLine, ntob
from cherrypy.test import helper
engine = cherrypy.engine
thisdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
class Dependency:
def __init__(self, bus):
self.bus = bus
self.running = False
self.startcount = 0
self.gracecount = 0
self.threads = {}
def subscribe(self):
self.bus.subscribe('start', self.start)
self.bus.subscribe('stop', self.stop)
self.bus.subscribe('graceful', self.graceful)
self.bus.subscribe('start_thread', self.startthread)
self.bus.subscribe('stop_thread', self.stopthread)
def start(self):
self.running = True
self.startcount += 1
def stop(self):
self.running = False
def graceful(self):
self.gracecount += 1
def startthread(self, thread_id):
self.threads[thread_id] = None
def stopthread(self, thread_id):
del self.threads[thread_id]
db_connection = Dependency(engine)
def setup_server():
class Root:
def index(self):
return "Hello World"
index.exposed = True
def ctrlc(self):
raise KeyboardInterrupt()
ctrlc.exposed = True
def graceful(self):
engine.graceful()
return "app was (gracefully) restarted succesfully"
graceful.exposed = True
def block_explicit(self):
while True:
if cherrypy.response.timed_out:
cherrypy.response.timed_out = False
return "broken!"
time.sleep(0.01)
block_explicit.exposed = True
def block_implicit(self):
time.sleep(0.5)
return "response.timeout = %s" % cherrypy.response.timeout
block_implicit.exposed = True
cherrypy.tree.mount(Root())
cherrypy.config.update({
'environment': 'test_suite',
'engine.deadlock_poll_freq': 0.1,
})
db_connection.subscribe()
# ------------ Enough helpers. Time for real live test cases. ------------ #
class ServerStateTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def setUp(self):
cherrypy.server.socket_timeout = 0.1
self.do_gc_test = False
def test_0_NormalStateFlow(self):
engine.stop()
# Our db_connection should not be running
self.assertEqual(db_connection.running, False)
self.assertEqual(db_connection.startcount, 1)
self.assertEqual(len(db_connection.threads), 0)
# Test server start
engine.start()
self.assertEqual(engine.state, engine.states.STARTED)
host = cherrypy.server.socket_host
port = cherrypy.server.socket_port
self.assertRaises(IOError, cherrypy._cpserver.check_port, host, port)
# The db_connection should be running now
self.assertEqual(db_connection.running, True)
self.assertEqual(db_connection.startcount, 2)
self.assertEqual(len(db_connection.threads), 0)
self.getPage("/")
self.assertBody("Hello World")
self.assertEqual(len(db_connection.threads), 1)
# Test engine stop. This will also stop the HTTP server.
engine.stop()
self.assertEqual(engine.state, engine.states.STOPPED)
# Verify that our custom stop function was called
self.assertEqual(db_connection.running, False)
self.assertEqual(len(db_connection.threads), 0)
# Block the main thread now and verify that exit() works.
def exittest():
self.getPage("/")
self.assertBody("Hello World")
engine.exit()
cherrypy.server.start()
engine.start_with_callback(exittest)
engine.block()
self.assertEqual(engine.state, engine.states.EXITING)
def test_1_Restart(self):
cherrypy.server.start()
engine.start()
# The db_connection should be running now
self.assertEqual(db_connection.running, True)
grace = db_connection.gracecount
self.getPage("/")
self.assertBody("Hello World")
self.assertEqual(len(db_connection.threads), 1)
# Test server restart from this thread
engine.graceful()
self.assertEqual(engine.state, engine.states.STARTED)
self.getPage("/")
self.assertBody("Hello World")
self.assertEqual(db_connection.running, True)
self.assertEqual(db_connection.gracecount, grace + 1)
self.assertEqual(len(db_connection.threads), 1)
# Test server restart from inside a page handler
self.getPage("/graceful")
self.assertEqual(engine.state, engine.states.STARTED)
self.assertBody("app was (gracefully) restarted succesfully")
self.assertEqual(db_connection.running, True)
self.assertEqual(db_connection.gracecount, grace + 2)
# Since we are requesting synchronously, is only one thread used?
# Note that the "/graceful" request has been flushed.
self.assertEqual(len(db_connection.threads), 0)
engine.stop()
self.assertEqual(engine.state, engine.states.STOPPED)
self.assertEqual(db_connection.running, False)
self.assertEqual(len(db_connection.threads), 0)
def test_2_KeyboardInterrupt(self):
# Raise a keyboard interrupt in the HTTP server's main thread.
# We must start the server in this, the main thread
engine.start()
cherrypy.server.start()
self.persistent = True
try:
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody("Hello World")
self.assertNoHeader("Connection")
cherrypy.server.httpserver.interrupt = KeyboardInterrupt
engine.block()
self.assertEqual(db_connection.running, False)
self.assertEqual(len(db_connection.threads), 0)
self.assertEqual(engine.state, engine.states.EXITING)
finally:
self.persistent = False
# Raise a keyboard interrupt in a page handler; on multithreaded
# servers, this should occur in one of the worker threads.
# This should raise a BadStatusLine error, since the worker
# thread will just die without writing a response.
engine.start()
cherrypy.server.start()
try:
self.getPage("/ctrlc")
except BadStatusLine:
pass
else:
print(self.body)
self.fail("AssertionError: BadStatusLine not raised")
engine.block()
self.assertEqual(db_connection.running, False)
self.assertEqual(len(db_connection.threads), 0)
def test_3_Deadlocks(self):
cherrypy.config.update({'response.timeout': 0.2})
engine.start()
cherrypy.server.start()
try:
self.assertNotEqual(engine.timeout_monitor.thread, None)
# Request a "normal" page.
self.assertEqual(engine.timeout_monitor.servings, [])
self.getPage("/")
self.assertBody("Hello World")
# request.close is called async.
while engine.timeout_monitor.servings:
sys.stdout.write(".")
time.sleep(0.01)
# Request a page that explicitly checks itself for deadlock.
# The deadlock_timeout should be 2 secs.
self.getPage("/block_explicit")
self.assertBody("broken!")
# Request a page that implicitly breaks deadlock.
# If we deadlock, we want to touch as little code as possible,
# so we won't even call handle_error, just bail ASAP.
self.getPage("/block_implicit")
self.assertStatus(500)
self.assertInBody("raise cherrypy.TimeoutError()")
finally:
engine.exit()
def test_4_Autoreload(self):
# If test_3 has not been executed, the server won't be stopped,
# so we'll have to do it.
if engine.state != engine.states.EXITING:
engine.exit()
# Start the demo script in a new process
p = helper.CPProcess(ssl=(self.scheme.lower() == 'https'))
p.write_conf(extra='test_case_name: "test_4_Autoreload"')
p.start(imports='cherrypy.test._test_states_demo')
try:
self.getPage("/start")
start = float(self.body)
# Give the autoreloader time to cache the file time.
time.sleep(2)
# Touch the file
os.utime(os.path.join(thisdir, "_test_states_demo.py"), None)
# Give the autoreloader time to re-exec the process
time.sleep(2)
host = cherrypy.server.socket_host
port = cherrypy.server.socket_port
cherrypy._cpserver.wait_for_occupied_port(host, port)
self.getPage("/start")
if not (float(self.body) > start):
raise AssertionError("start time %s not greater than %s" %
(float(self.body), start))
finally:
# Shut down the spawned process
self.getPage("/exit")
p.join()
def test_5_Start_Error(self):
# If test_3 has not been executed, the server won't be stopped,
# so we'll have to do it.
if engine.state != engine.states.EXITING:
engine.exit()
# If a process errors during start, it should stop the engine
# and exit with a non-zero exit code.
p = helper.CPProcess(ssl=(self.scheme.lower() == 'https'),
wait=True)
p.write_conf(
extra="""starterror: True
test_case_name: "test_5_Start_Error"
"""
)
p.start(imports='cherrypy.test._test_states_demo')
if p.exit_code == 0:
self.fail("Process failed to return nonzero exit code.")
class PluginTests(helper.CPWebCase):
def test_daemonize(self):
if os.name not in ['posix']:
return self.skip("skipped (not on posix) ")
self.HOST = '127.0.0.1'
self.PORT = 8081
# Spawn the process and wait, when this returns, the original process
# is finished. If it daemonized properly, we should still be able
# to access pages.
p = helper.CPProcess(ssl=(self.scheme.lower() == 'https'),
wait=True, daemonize=True,
socket_host='127.0.0.1',
socket_port=8081)
p.write_conf(
extra='test_case_name: "test_daemonize"')
p.start(imports='cherrypy.test._test_states_demo')
try:
# Just get the pid of the daemonization process.
self.getPage("/pid")
self.assertStatus(200)
page_pid = int(self.body)
self.assertEqual(page_pid, p.get_pid())
finally:
# Shut down the spawned process
self.getPage("/exit")
p.join()
# Wait until here to test the exit code because we want to ensure
# that we wait for the daemon to finish running before we fail.
if p.exit_code != 0:
self.fail("Daemonized parent process failed to exit cleanly.")
class SignalHandlingTests(helper.CPWebCase):
def test_SIGHUP_tty(self):
# When not daemonized, SIGHUP should shut down the server.
try:
from signal import SIGHUP
except ImportError:
return self.skip("skipped (no SIGHUP) ")
# Spawn the process.
p = helper.CPProcess(ssl=(self.scheme.lower() == 'https'))
p.write_conf(
extra='test_case_name: "test_SIGHUP_tty"')
p.start(imports='cherrypy.test._test_states_demo')
# Send a SIGHUP
os.kill(p.get_pid(), SIGHUP)
# This might hang if things aren't working right, but meh.
p.join()
def test_SIGHUP_daemonized(self):
# When daemonized, SIGHUP should restart the server.
try:
from signal import SIGHUP
except ImportError:
return self.skip("skipped (no SIGHUP) ")
if os.name not in ['posix']:
return self.skip("skipped (not on posix) ")
# Spawn the process and wait, when this returns, the original process
# is finished. If it daemonized properly, we should still be able
# to access pages.
p = helper.CPProcess(ssl=(self.scheme.lower() == 'https'),
wait=True, daemonize=True)
p.write_conf(
extra='test_case_name: "test_SIGHUP_daemonized"')
p.start(imports='cherrypy.test._test_states_demo')
pid = p.get_pid()
try:
# Send a SIGHUP
os.kill(pid, SIGHUP)
# Give the server some time to restart
time.sleep(2)
self.getPage("/pid")
self.assertStatus(200)
new_pid = int(self.body)
self.assertNotEqual(new_pid, pid)
finally:
# Shut down the spawned process
self.getPage("/exit")
p.join()
def _require_signal_and_kill(self, signal_name):
if not hasattr(signal, signal_name):
self.skip("skipped (no %(signal_name)s)" % vars())
if not hasattr(os, 'kill'):
self.skip("skipped (no os.kill)")
def test_SIGTERM(self):
"SIGTERM should shut down the server whether daemonized or not."
self._require_signal_and_kill('SIGTERM')
# Spawn a normal, undaemonized process.
p = helper.CPProcess(ssl=(self.scheme.lower() == 'https'))
p.write_conf(
extra='test_case_name: "test_SIGTERM"')
p.start(imports='cherrypy.test._test_states_demo')
# Send a SIGTERM
os.kill(p.get_pid(), signal.SIGTERM)
# This might hang if things aren't working right, but meh.
p.join()
if os.name in ['posix']:
# Spawn a daemonized process and test again.
p = helper.CPProcess(ssl=(self.scheme.lower() == 'https'),
wait=True, daemonize=True)
p.write_conf(
extra='test_case_name: "test_SIGTERM_2"')
p.start(imports='cherrypy.test._test_states_demo')
# Send a SIGTERM
os.kill(p.get_pid(), signal.SIGTERM)
# This might hang if things aren't working right, but meh.
p.join()
def test_signal_handler_unsubscribe(self):
self._require_signal_and_kill('SIGTERM')
# Although Windows has `os.kill` and SIGTERM is defined, the
# platform does not implement signals and sending SIGTERM
# will result in a forced termination of the process.
# Therefore, this test is not suitable for Windows.
if os.name == 'nt':
self.skip("SIGTERM not available")
# Spawn a normal, undaemonized process.
p = helper.CPProcess(ssl=(self.scheme.lower() == 'https'))
p.write_conf(
extra="""unsubsig: True
test_case_name: "test_signal_handler_unsubscribe"
""")
p.start(imports='cherrypy.test._test_states_demo')
# Ask the process to quit
os.kill(p.get_pid(), signal.SIGTERM)
# This might hang if things aren't working right, but meh.
p.join()
# Assert the old handler ran.
target_line = open(p.error_log, 'rb').readlines()[-10]
if not ntob("I am an old SIGTERM handler.") in target_line:
self.fail("Old SIGTERM handler did not run.\n%r" % target_line)
class WaitTests(unittest.TestCase):
def test_wait_for_occupied_port_INADDR_ANY(self):
"""
Wait on INADDR_ANY should not raise IOError
In cases where the loopback interface does not exist, CherryPy cannot
effectively determine if a port binding to INADDR_ANY was effected.
In this situation, CherryPy should assume that it failed to detect
the binding (not that the binding failed) and only warn that it could
not verify it.
"""
# At such a time that CherryPy can reliably determine one or more
# viable IP addresses of the host, this test may be removed.
# Simulate the behavior we observe when no loopback interface is
# present by: finding a port that's not occupied, then wait on it.
free_port = self.find_free_port()
servers = cherrypy.process.servers
def with_shorter_timeouts(func):
"""
A context where occupied_port_timeout is much smaller to speed
test runs.
"""
# When we have Python 2.5, simplify using the with_statement.
orig_timeout = servers.occupied_port_timeout
servers.occupied_port_timeout = .07
try:
func()
finally:
servers.occupied_port_timeout = orig_timeout
def do_waiting():
# Wait on the free port that's unbound
with warnings.catch_warnings(record=True) as w:
servers.wait_for_occupied_port('0.0.0.0', free_port)
self.assertEqual(len(w), 1)
self.assertTrue(isinstance(w[0], warnings.WarningMessage))
self.assertTrue(
'Unable to verify that the server is bound on ' in str(w[0]))
# The wait should still raise an IO error if INADDR_ANY was
# not supplied.
self.assertRaises(IOError, servers.wait_for_occupied_port,
'127.0.0.1', free_port)
with_shorter_timeouts(do_waiting)
def find_free_port(self):
"Find a free port by binding to port 0 then unbinding."
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
free_port = sock.getsockname()[1]
sock.close()
return free_port
| mit |
fengbaicanhe/intellij-community | python/helpers/pydev/pydevd_save_locals.py | 53 | 1523 | """
Utility for saving locals.
"""
import sys
import pydevd_vars
def is_save_locals_available():
try:
if '__pypy__' in sys.builtin_module_names:
import __pypy__
save_locals = __pypy__.locals_to_fast
return True
except:
pass
try:
import ctypes
except:
return False #Not all Python versions have it
try:
func = ctypes.pythonapi.PyFrame_LocalsToFast
except:
return False
return True
def save_locals(frame):
"""
Copy values from locals_dict into the fast stack slots in the given frame.
Note: the 'save_locals' branch had a different approach wrapping the frame (much more code, but it gives ideas
on how to save things partially, not the 'whole' locals).
"""
if not isinstance(frame, pydevd_vars.frame_type):
# Fix exception when changing Django variable (receiving DjangoTemplateFrame)
return
try:
if '__pypy__' in sys.builtin_module_names:
import __pypy__
save_locals = __pypy__.locals_to_fast
save_locals(frame)
return
except:
pass
try:
import ctypes
except:
return #Not all Python versions have it
try:
func = ctypes.pythonapi.PyFrame_LocalsToFast
except:
return
#parameter 0: don't set to null things that are not in the frame.f_locals (which seems good in the debugger context).
func(ctypes.py_object(frame), ctypes.c_int(0))
| apache-2.0 |
srickardti/openthread | tests/scripts/thread-cert/Cert_5_1_06_RemoveRouterId.py | 1 | 4313 | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import command
import mle
import thread_cert
from command import CheckType
LEADER = 1
ROUTER1 = 2
class Cert_5_1_06_RemoveRouterId(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'mode': 'rdn',
'panid': 0xface,
'allowlist': [ROUTER1]
},
ROUTER1: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
rloc16 = self.nodes[ROUTER1].get_addr16()
for addr in self.nodes[ROUTER1].get_addrs():
self.assertTrue(self.nodes[LEADER].ping(addr))
self.nodes[LEADER].release_router_id(rloc16 >> 10)
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
leader_messages = self.simulator.get_messages_sent_by(LEADER)
router1_messages = self.simulator.get_messages_sent_by(ROUTER1)
# 1 - All
leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router1_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
leader_messages.next_coap_message("2.04")
# 2 - N/A
# 3 - Router1
msg = router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
command.check_parent_request(msg, is_first_request=True)
msg = router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST, sent_to_node=self.nodes[LEADER])
command.check_child_id_request(
msg,
tlv_request=CheckType.CONTAIN,
mle_frame_counter=CheckType.OPTIONAL,
address_registration=CheckType.NOT_CONTAIN,
active_timestamp=CheckType.OPTIONAL,
pending_timestamp=CheckType.OPTIONAL,
)
msg = router1_messages.next_coap_message(code="0.02")
command.check_address_solicit(msg, was_router=True)
# 4 - Router1
for addr in self.nodes[ROUTER1].get_addrs():
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
linktlh/Toontown-journey | toontown/makeatoon/BodyShop.py | 4 | 19415 | from pandac.PandaModules import *
from toontown.toon import ToonDNA
from direct.fsm import StateData
from direct.gui.DirectGui import *
from MakeAToonGlobals import *
import random
from toontown.toonbase import TTLocalizer
from direct.directnotify import DirectNotifyGlobal
from toontown.toontowngui import TeaserPanel
import ShuffleButton
class BodyShop(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('BodyShop')
def __init__(self, doneEvent):
StateData.StateData.__init__(self, doneEvent)
self.toon = None
self.torsoChoice = 0
self.legChoice = 0
self.headChoice = 0
self.speciesChoice = 0
return
def enter(self, toon, shopsVisited = []):
base.disableMouse()
self.toon = toon
self.dna = self.toon.getStyle()
gender = self.toon.style.getGender()
self.speciesStart = self.getSpeciesStart()
self.speciesChoice = self.speciesStart
self.headStart = 0
self.headChoice = ToonDNA.toonHeadTypes.index(self.dna.head) - ToonDNA.getHeadStartIndex(self.species)
self.torsoStart = 0
self.torsoChoice = ToonDNA.toonTorsoTypes.index(self.dna.torso) % 3
self.legStart = 0
self.legChoice = ToonDNA.toonLegTypes.index(self.dna.legs)
if CLOTHESSHOP in shopsVisited:
self.clothesPicked = 1
else:
self.clothesPicked = 0
self.clothesPicked = 1
if gender == 'm' or ToonDNA.GirlBottoms[self.dna.botTex][1] == ToonDNA.SHORTS:
torsoStyle = 's'
torsoPool = ToonDNA.toonTorsoTypes[:3]
else:
torsoStyle = 'd'
torsoPool = ToonDNA.toonTorsoTypes[3:6]
self.__swapSpecies(0)
self.__swapHead(0)
self.__swapTorso(0)
self.__swapLegs(0)
choicePool = [ToonDNA.toonHeadTypes, torsoPool, ToonDNA.toonLegTypes]
self.shuffleButton.setChoicePool(choicePool)
self.accept(self.shuffleFetchMsg, self.changeBody)
self.acceptOnce('last', self.__handleBackward)
self.accept('next', self.__handleForward)
self.acceptOnce('MAT-newToonCreated', self.shuffleButton.cleanHistory)
self.restrictHeadType(self.dna.head)
def getSpeciesStart(self):
for species in ToonDNA.toonSpeciesTypes:
if species == self.dna.head[0]:
self.species = species
return ToonDNA.toonSpeciesTypes.index(species)
def showButtons(self):
self.parentFrame.show()
def hideButtons(self):
self.parentFrame.hide()
self.memberButton.hide()
def exit(self):
try:
del self.toon
except:
self.notify.warning('BodyShop: toon not found')
self.hideButtons()
self.ignore('last')
self.ignore('next')
self.ignore('enter')
self.ignore(self.shuffleFetchMsg)
def load(self):
self.gui = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_mainGui')
guiRArrowUp = self.gui.find('**/tt_t_gui_mat_arrowUp')
guiRArrowDown = self.gui.find('**/tt_t_gui_mat_arrowDown')
guiRArrowRollover = self.gui.find('**/tt_t_gui_mat_arrowUp')
guiRArrowDisabled = self.gui.find('**/tt_t_gui_mat_arrowDisabled')
shuffleFrame = self.gui.find('**/tt_t_gui_mat_shuffleFrame')
shuffleArrowUp = self.gui.find('**/tt_t_gui_mat_shuffleArrowUp')
shuffleArrowDown = self.gui.find('**/tt_t_gui_mat_shuffleArrowDown')
shuffleArrowRollover = self.gui.find('**/tt_t_gui_mat_shuffleArrowUp')
shuffleArrowDisabled = self.gui.find('**/tt_t_gui_mat_shuffleArrowDisabled')
self.upsellModel = loader.loadModel('phase_3/models/gui/tt_m_gui_ups_mainGui')
upsellTex = self.upsellModel.find('**/tt_t_gui_ups_banner')
self.parentFrame = DirectFrame(relief=DGG.RAISED, pos=(0.98, 0, 0.416), frameColor=(1, 0, 0, 0))
self.parentFrame.setPos(-0.36, 0, -0.5)
self.parentFrame.reparentTo(base.a2dTopRight)
self.speciesFrame = DirectFrame(parent=self.parentFrame, image=shuffleFrame, image_scale=halfButtonInvertScale, relief=None, pos=(0, 0, -0.073), hpr=(0, 0, 0), scale=1.3, frameColor=(1, 1, 1, 1), text='Species', text_scale=0.0625, text_pos=(-0.001, -0.015), text_fg=(1, 1, 1, 1))
self.speciesLButton = DirectButton(parent=self.speciesFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonScale, image1_scale=halfButtonHoverScale, image2_scale=halfButtonHoverScale, pos=(-0.2, 0, 0), command=self.__swapSpecies, extraArgs=[-1])
self.speciesRButton = DirectButton(parent=self.speciesFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonInvertScale, image1_scale=halfButtonInvertHoverScale, image2_scale=halfButtonInvertHoverScale, pos=(0.2, 0, 0), command=self.__swapSpecies, extraArgs=[1])
self.headFrame = DirectFrame(parent=self.parentFrame, image=shuffleFrame, image_scale=halfButtonInvertScale, relief=None, pos=(0, 0, -0.3), hpr=(0, 0, 2), scale=0.9, frameColor=(1, 1, 1, 1), text=TTLocalizer.BodyShopHead, text_scale=0.0625, text_pos=(-0.001, -0.015), text_fg=(1, 1, 1, 1))
self.headLButton = DirectButton(parent=self.headFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonScale, image1_scale=halfButtonHoverScale, image2_scale=halfButtonHoverScale, pos=(-0.2, 0, 0), command=self.__swapHead, extraArgs=[-1])
self.headRButton = DirectButton(parent=self.headFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonInvertScale, image1_scale=halfButtonInvertHoverScale, image2_scale=halfButtonInvertHoverScale, pos=(0.2, 0, 0), command=self.__swapHead, extraArgs=[1])
self.bodyFrame = DirectFrame(parent=self.parentFrame, image=shuffleFrame, image_scale=halfButtonScale, relief=None, pos=(0, 0, -0.5), hpr=(0, 0, -2), scale=0.9, frameColor=(1, 1, 1, 1), text=TTLocalizer.BodyShopBody, text_scale=0.0625, text_pos=(-0.001, -0.015), text_fg=(1, 1, 1, 1))
self.torsoLButton = DirectButton(parent=self.bodyFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonScale, image1_scale=halfButtonHoverScale, image2_scale=halfButtonHoverScale, pos=(-0.2, 0, 0), command=self.__swapTorso, extraArgs=[-1])
self.torsoRButton = DirectButton(parent=self.bodyFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonInvertScale, image1_scale=halfButtonInvertHoverScale, image2_scale=halfButtonInvertHoverScale, pos=(0.2, 0, 0), command=self.__swapTorso, extraArgs=[1])
self.legsFrame = DirectFrame(parent=self.parentFrame, image=shuffleFrame, image_scale=halfButtonInvertScale, relief=None, pos=(0, 0, -0.7), hpr=(0, 0, 3), scale=0.9, frameColor=(1, 1, 1, 1), text=TTLocalizer.BodyShopLegs, text_scale=0.0625, text_pos=(-0.001, -0.015), text_fg=(1, 1, 1, 1))
self.legLButton = DirectButton(parent=self.legsFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonScale, image1_scale=halfButtonHoverScale, image2_scale=halfButtonHoverScale, pos=(-0.2, 0, 0), command=self.__swapLegs, extraArgs=[-1])
self.legRButton = DirectButton(parent=self.legsFrame, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=halfButtonInvertScale, image1_scale=halfButtonInvertHoverScale, image2_scale=halfButtonInvertHoverScale, pos=(0.2, 0, 0), command=self.__swapLegs, extraArgs=[1])
self.memberButton = DirectButton(relief=None, image=(upsellTex,
upsellTex,
upsellTex,
upsellTex), image_scale=halfButtonScale, image1_scale=halfButtonHoverScale, image2_scale=halfButtonHoverScale, scale=0.9, pos=(0, 0, -0.84), command=self.__restrictForward)
self.parentFrame.hide()
self.memberButton.hide()
self.shuffleFetchMsg = 'BodyShopShuffle'
self.shuffleButton = ShuffleButton.ShuffleButton(self, self.shuffleFetchMsg)
return
def unload(self):
self.gui.removeNode()
del self.gui
self.upsellModel.removeNode()
del self.upsellModel
self.parentFrame.destroy()
self.speciesFrame.destroy()
self.headFrame.destroy()
self.bodyFrame.destroy()
self.legsFrame.destroy()
self.speciesLButton.destroy()
self.speciesRButton.destroy()
self.headLButton.destroy()
self.headRButton.destroy()
self.torsoLButton.destroy()
self.torsoRButton.destroy()
self.legLButton.destroy()
self.legRButton.destroy()
self.memberButton.destroy()
del self.parentFrame
del self.speciesFrame
del self.headFrame
del self.bodyFrame
del self.legsFrame
del self.speciesLButton
del self.speciesRButton
del self.headLButton
del self.headRButton
del self.torsoLButton
del self.torsoRButton
del self.legLButton
del self.legRButton
del self.memberButton
self.shuffleButton.unload()
self.ignore('MAT-newToonCreated')
def __swapTorso(self, offset):
gender = self.toon.style.getGender()
if not self.clothesPicked:
length = len(ToonDNA.toonTorsoTypes[6:])
torsoOffset = 6
elif gender == 'm':
length = len(ToonDNA.toonTorsoTypes[:3])
torsoOffset = 0
if self.dna.armColor not in ToonDNA.defaultBoyColorList:
self.dna.armColor = ToonDNA.defaultBoyColorList[0]
if self.dna.legColor not in ToonDNA.defaultBoyColorList:
self.dna.legColor = ToonDNA.defaultBoyColorList[0]
if self.dna.headColor not in ToonDNA.defaultBoyColorList:
self.dna.headColor = ToonDNA.defaultBoyColorList[0]
if self.toon.style.topTex not in ToonDNA.MakeAToonBoyShirts:
randomShirt = ToonDNA.getRandomTop(gender, ToonDNA.MAKE_A_TOON)
shirtTex, shirtColor, sleeveTex, sleeveColor = randomShirt
self.toon.style.topTex = shirtTex
self.toon.style.topTexColor = shirtColor
self.toon.style.sleeveTex = sleeveTex
self.toon.style.sleeveTexColor = sleeveColor
if self.toon.style.botTex not in ToonDNA.MakeAToonBoyBottoms:
botTex, botTexColor = ToonDNA.getRandomBottom(gender, ToonDNA.MAKE_A_TOON)
self.toon.style.botTex = botTex
self.toon.style.botTexColor = botTexColor
else:
length = len(ToonDNA.toonTorsoTypes[3:6])
if self.toon.style.torso[1] == 'd':
torsoOffset = 3
else:
torsoOffset = 0
if self.dna.armColor not in ToonDNA.defaultGirlColorList:
self.dna.armColor = ToonDNA.defaultGirlColorList[0]
if self.dna.legColor not in ToonDNA.defaultGirlColorList:
self.dna.legColor = ToonDNA.defaultGirlColorList[0]
if self.dna.headColor not in ToonDNA.defaultGirlColorList:
self.dna.headColor = ToonDNA.defaultGirlColorList[0]
if self.toon.style.topTex not in ToonDNA.MakeAToonGirlShirts:
randomShirt = ToonDNA.getRandomTop(gender, ToonDNA.MAKE_A_TOON)
shirtTex, shirtColor, sleeveTex, sleeveColor = randomShirt
self.toon.style.topTex = shirtTex
self.toon.style.topTexColor = shirtColor
self.toon.style.sleeveTex = sleeveTex
self.toon.style.sleeveTexColor = sleeveColor
if self.toon.style.botTex not in ToonDNA.MakeAToonGirlBottoms:
if self.toon.style.torso[1] == 'd':
botTex, botTexColor = ToonDNA.getRandomBottom(gender, ToonDNA.MAKE_A_TOON, girlBottomType=ToonDNA.SKIRT)
self.toon.style.botTex = botTex
self.toon.style.botTexColor = botTexColor
torsoOffset = 3
else:
botTex, botTexColor = ToonDNA.getRandomBottom(gender, ToonDNA.MAKE_A_TOON, girlBottomType=ToonDNA.SHORTS)
self.toon.style.botTex = botTex
self.toon.style.botTexColor = botTexColor
torsoOffset = 0
self.torsoChoice = (self.torsoChoice + offset) % length
self.__updateScrollButtons(self.torsoChoice, length, self.torsoStart, self.torsoLButton, self.torsoRButton)
torso = ToonDNA.toonTorsoTypes[torsoOffset + self.torsoChoice]
self.dna.torso = torso
self.toon.swapToonTorso(torso)
self.toon.loop('neutral', 0)
self.toon.swapToonColor(self.dna)
def __swapLegs(self, offset):
length = len(ToonDNA.toonLegTypes)
self.legChoice = (self.legChoice + offset) % length
self.notify.debug('self.legChoice=%d, length=%d, self.legStart=%d' % (self.legChoice, length, self.legStart))
self.__updateScrollButtons(self.legChoice, length, self.legStart, self.legLButton, self.legRButton)
newLeg = ToonDNA.toonLegTypes[self.legChoice]
self.dna.legs = newLeg
self.toon.swapToonLegs(newLeg)
self.toon.loop('neutral', 0)
self.toon.swapToonColor(self.dna)
def __swapHead(self, offset):
self.headList = ToonDNA.getHeadList(self.species)
length = len(self.headList)
self.headChoice = (self.headChoice + offset) % length
self.__updateHead()
def __swapSpecies(self, offset):
length = len(ToonDNA.toonSpeciesTypes)
self.speciesChoice = (self.speciesChoice + offset) % length
self.__updateScrollButtons(self.speciesChoice, length, self.speciesStart, self.speciesLButton, self.speciesRButton)
self.species = ToonDNA.toonSpeciesTypes[self.speciesChoice]
self.headList = ToonDNA.getHeadList(self.species)
self.__changeSpeciesName(self.species)
maxHeadChoice = len(self.headList) - 1
if self.headChoice > maxHeadChoice:
self.headChoice = maxHeadChoice
self.__updateHead()
def __updateHead(self):
self.__updateScrollButtons(self.headChoice, len(self.headList), self.headStart, self.headLButton, self.headRButton)
headIndex = ToonDNA.getHeadStartIndex(self.species) + self.headChoice
newHead = ToonDNA.toonHeadTypes[headIndex]
self.dna.head = newHead
self.toon.swapToonHead(newHead)
self.toon.loop('neutral', 0)
self.toon.swapToonColor(self.dna)
self.restrictHeadType(newHead)
def __updateScrollButtons(self, choice, length, start, lButton, rButton):
if choice == (start - 1) % length:
rButton['state'] = DGG.DISABLED
elif choice != (start - 1) % length:
rButton['state'] = DGG.NORMAL
if choice == start % length:
lButton['state'] = DGG.DISABLED
elif choice != start % length:
lButton['state'] = DGG.NORMAL
if lButton['state'] == DGG.DISABLED and rButton['state'] == DGG.DISABLED:
self.notify.info('Both buttons got disabled! Doing fallback code. choice%d, length=%d, start=%d, lButton=%s, rButton=%s' % (choice,
length,
start,
lButton,
rButton))
if choice == start % length:
lButton['state'] = DGG.DISABLED
rButton['state'] = DGG.NORMAL
elif choice == (start - 1) % length:
lButton['state'] = DGG.NORMAL
rButton['state'] = DGG.DISABLED
else:
lButton['state'] = DGG.NORMAL
rButton['state'] = DGG.NORMAL
def __handleForward(self):
self.doneStatus = 'next'
messenger.send(self.doneEvent)
def __handleBackward(self):
self.doneStatus = 'last'
messenger.send(self.doneEvent)
def restrictHeadType(self, head):
if not base.cr.isPaid():
if head[0] in ('h', 'p', 'b'):
self.accept('next', self.__restrictForward)
else:
self.accept('next', self.__handleForward)
def __restrictForward(self):
TeaserPanel.TeaserPanel(pageName='species')
def changeBody(self):
newChoice = self.shuffleButton.getCurrChoice()
newHead = newChoice[0]
newSpeciesIndex = ToonDNA.toonSpeciesTypes.index(ToonDNA.getSpecies(newHead))
newHeadIndex = ToonDNA.toonHeadTypes.index(newHead) - ToonDNA.getHeadStartIndex(ToonDNA.getSpecies(newHead))
newTorsoIndex = ToonDNA.toonTorsoTypes.index(newChoice[1])
newLegsIndex = ToonDNA.toonLegTypes.index(newChoice[2])
oldHead = self.toon.style.head
oldSpeciesIndex = ToonDNA.toonSpeciesTypes.index(ToonDNA.getSpecies(oldHead))
oldHeadIndex = ToonDNA.toonHeadTypes.index(oldHead) - ToonDNA.getHeadStartIndex(ToonDNA.getSpecies(oldHead))
oldTorsoIndex = ToonDNA.toonTorsoTypes.index(self.toon.style.torso)
oldLegsIndex = ToonDNA.toonLegTypes.index(self.toon.style.legs)
self.__swapSpecies(newSpeciesIndex - oldSpeciesIndex)
self.__swapHead(newHeadIndex - oldHeadIndex)
self.__swapTorso(newTorsoIndex - oldTorsoIndex)
self.__swapLegs(newLegsIndex - oldLegsIndex)
def getCurrToonSetting(self):
return [self.toon.style.head, self.toon.style.torso, self.toon.style.legs]
def __changeSpeciesName(self, species):
if species == 'd':
self.speciesFrame['text'] = TTLocalizer.AnimalToSpecies['dog']
self.memberButton.hide()
elif species == 'c':
self.speciesFrame['text'] = TTLocalizer.AnimalToSpecies['cat']
self.memberButton.hide()
elif species == 'm':
self.speciesFrame['text'] = TTLocalizer.AnimalToSpecies['mouse']
self.memberButton.hide()
elif species == 'h':
self.speciesFrame['text'] = TTLocalizer.AnimalToSpecies['horse']
self.memberButton.show()
elif species == 'r':
self.speciesFrame['text'] = TTLocalizer.AnimalToSpecies['rabbit']
self.memberButton.hide()
elif species == 'f':
self.speciesFrame['text'] = TTLocalizer.AnimalToSpecies['duck']
self.memberButton.hide()
elif species == 'p':
self.speciesFrame['text'] = TTLocalizer.AnimalToSpecies['monkey']
self.memberButton.show()
elif species == 'b':
self.speciesFrame['text'] = TTLocalizer.AnimalToSpecies['bear']
self.memberButton.show()
elif species == 's':
self.speciesFrame['text'] = TTLocalizer.AnimalToSpecies['pig']
self.memberButton.hide()
if base.cr.isPaid():
self.memberButton.hide()
| apache-2.0 |
sbellem/django | django/contrib/redirects/migrations/0001_initial.py | 108 | 1417 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Redirect',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('site', models.ForeignKey(
to='sites.Site',
to_field='id',
on_delete=models.CASCADE,
verbose_name='site',
)),
('old_path', models.CharField(help_text="This should be an absolute path, excluding the domain name. Example: '/events/search/'.", max_length=200, verbose_name='redirect from', db_index=True)),
('new_path', models.CharField(help_text="This can be either an absolute path (as above) or a full URL starting with 'http://'.", max_length=200, verbose_name='redirect to', blank=True)),
],
options={
'ordering': ('old_path',),
'unique_together': set([('site', 'old_path')]),
'db_table': 'django_redirect',
'verbose_name': 'redirect',
'verbose_name_plural': 'redirects',
},
bases=(models.Model,),
),
]
| bsd-3-clause |
Juniper/nova | nova/virt/powervm/driver.py | 2 | 14038 | # Copyright 2014, 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Connection to PowerVM hypervisor through NovaLink."""
from oslo_log import log as logging
from oslo_utils import excutils
from pypowervm import adapter as pvm_apt
from pypowervm import const as pvm_const
from pypowervm import exceptions as pvm_exc
from pypowervm.helpers import log_helper as log_hlp
from pypowervm.helpers import vios_busy as vio_hlp
from pypowervm.tasks import partition as pvm_par
from pypowervm.tasks import storage as pvm_stor
from pypowervm.tasks import vterm as pvm_vterm
from pypowervm.wrappers import managed_system as pvm_ms
import six
from taskflow.patterns import linear_flow as tf_lf
from nova import conf as cfg
from nova.console import type as console_type
from nova import exception as exc
from nova import image
from nova.virt import driver
from nova.virt.powervm.disk import ssp
from nova.virt.powervm import host as pvm_host
from nova.virt.powervm.tasks import base as tf_base
from nova.virt.powervm.tasks import storage as tf_stg
from nova.virt.powervm.tasks import vm as tf_vm
from nova.virt.powervm import vm
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PowerVMDriver(driver.ComputeDriver):
"""PowerVM NovaLink Implementation of Compute Driver.
https://wiki.openstack.org/wiki/PowerVM
"""
def __init__(self, virtapi):
super(PowerVMDriver, self).__init__(virtapi)
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function.
Includes catching up with currently running VMs on the given host.
"""
# Build the adapter. May need to attempt the connection multiple times
# in case the PowerVM management API service is starting.
# TODO(efried): Implement async compute service enable/disable like
# I73a34eb6e0ca32d03e54d12a5e066b2ed4f19a61
self.adapter = pvm_apt.Adapter(
pvm_apt.Session(conn_tries=60),
helpers=[log_hlp.log_helper, vio_hlp.vios_busy_retry_helper])
# Make sure the Virtual I/O Server(s) are available.
pvm_par.validate_vios_ready(self.adapter)
self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
# Do a scrub of the I/O plane to make sure the system is in good shape
LOG.info("Clearing stale I/O connections on driver init.")
pvm_stor.ComprehensiveScrub(self.adapter).execute()
# Initialize the disk adapter
# TODO(efried): Other disk adapters (localdisk), by conf selection.
self.disk_dvr = ssp.SSPDiskAdapter(self.adapter,
self.host_wrapper.uuid)
self.image_api = image.API()
LOG.info("The PowerVM compute driver has been initialized.")
@staticmethod
def _log_operation(op, instance):
"""Log entry point of driver operations."""
LOG.info('Operation: %(op)s. Virtual machine display name: '
'%(display_name)s, name: %(name)s',
{'op': op, 'display_name': instance.display_name,
'name': instance.name}, instance=instance)
def get_info(self, instance):
"""Get the current status of an instance.
:param instance: nova.objects.instance.Instance object
:returns: An InstanceInfo object containing:
:state: the running state, one of the power_state codes
:max_mem_kb: (int) the maximum memory in KBytes allowed
:mem_kb: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time_ns: (int) the CPU time used in nanoseconds
:id: a unique ID for the instance
"""
return vm.InstanceInfo(self.adapter, instance)
def list_instances(self):
"""Return the names of all the instances known to the virt host.
:return: VM Names as a list.
"""
return vm.get_lpar_names(self.adapter)
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
return [CONF.host]
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and as part of a
periodic task.
:param nodename: Node from which the caller wants to get resources.
A driver that manages only one node can safely ignore
this.
:return: Dictionary describing resources.
"""
# TODO(efried): Switch to get_inventory, per blueprint
# custom-resource-classes-pike
# Do this here so it refreshes each time this method is called.
self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
# Get host information
data = pvm_host.build_host_resource_from_ms(self.host_wrapper)
# Add the disk information
data["local_gb"] = self.disk_dvr.capacity
data["local_gb_used"] = self.disk_dvr.capacity_used
return data
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
:param nova.objects.ImageMeta image_meta:
The metadata of the image of the instance.
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param network_info: instance network information
:param block_device_info: Information about block devices to be
attached to the instance.
"""
self._log_operation('spawn', instance)
# Define the flow
flow_spawn = tf_lf.Flow("spawn")
# This FeedTask accumulates VIOS storage connection operations to be
# run in parallel. Include both SCSI and fibre channel mappings for
# the scrubber.
stg_ftsk = pvm_par.build_active_vio_feed_task(
self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
flow_spawn.add(tf_vm.Create(
self.adapter, self.host_wrapper, instance, stg_ftsk))
# TODO(thorst, efried) Plug the VIFs
# Create the boot image.
flow_spawn.add(tf_stg.CreateDiskForImg(
self.disk_dvr, context, instance, image_meta))
# Connects up the disk to the LPAR
flow_spawn.add(tf_stg.AttachDisk(
self.disk_dvr, instance, stg_ftsk=stg_ftsk))
# TODO(thorst, efried) Add the config drive
# Add the transaction manager flow at the end of the 'I/O
# connection' tasks. This will run all the connections in parallel.
flow_spawn.add(stg_ftsk)
# Last step is to power on the system.
flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))
# Run the flow.
tf_base.run(flow_spawn, instance=instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy the specified instance from the Hypervisor.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param network_info: instance network information
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
"""
# TODO(thorst, efried) Add resize checks for destroy
self._log_operation('destroy', instance)
def _setup_flow_and_run():
# Define the flow
flow = tf_lf.Flow("destroy")
# Power Off the LPAR. If its disks are about to be deleted, issue a
# hard shutdown.
flow.add(tf_vm.PowerOff(self.adapter, instance,
force_immediate=destroy_disks))
# TODO(thorst, efried) Add unplug vifs task
# TODO(thorst, efried) Add config drive tasks
# TODO(thorst, efried) Add volume disconnect tasks
# Detach the disk storage adapters
flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))
# Delete the storage disks
if destroy_disks:
flow.add(tf_stg.DeleteDisk(self.disk_dvr))
# TODO(thorst, efried) Add LPAR id based scsi map clean up task
flow.add(tf_vm.Delete(self.adapter, instance))
# Build the engine & run!
tf_base.run(flow, instance=instance)
try:
_setup_flow_and_run()
except exc.InstanceNotFound:
LOG.debug('VM was not found during destroy operation.',
instance=instance)
return
except pvm_exc.Error as e:
LOG.exception("PowerVM error during destroy.", instance=instance)
# Convert to a Nova exception
raise exc.InstanceTerminationFailure(reason=six.text_type(e))
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
self._log_operation('power_off', instance)
force_immediate = (timeout == 0)
timeout = timeout or None
vm.power_off(self.adapter, instance, force_immediate=force_immediate,
timeout=timeout)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
:param instance: nova.objects.instance.Instance
"""
self._log_operation('power_on', instance)
vm.power_on(self.adapter, instance)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: nova.objects.instance.Instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
:param block_device_info: Info pertaining to attached volumes
:param bad_volumes_callback: Function to handle any bad volumes
encountered
"""
self._log_operation(reboot_type + ' reboot', instance)
vm.reboot(self.adapter, instance, reboot_type == 'HARD')
# pypowervm exceptions are sufficient to indicate real failure.
# Otherwise, pypowervm thinks the instance is up.
def get_vnc_console(self, context, instance):
"""Get connection info for a vnc console.
:param context: security context
:param instance: nova.objects.instance.Instance
:return: An instance of console.type.ConsoleVNC
"""
self._log_operation('get_vnc_console', instance)
lpar_uuid = vm.get_pvm_uuid(instance)
# Build the connection to the VNC.
host = CONF.vnc.vncserver_proxyclient_address
# TODO(thorst, efried) Add the x509 certificate support when it lands
try:
# Open up a remote vterm
port = pvm_vterm.open_remotable_vnc_vterm(
self.adapter, lpar_uuid, host, vnc_path=lpar_uuid)
# Note that the VNC viewer will wrap the internal_access_path with
# the HTTP content.
return console_type.ConsoleVNC(host=host, port=port,
internal_access_path=lpar_uuid)
except pvm_exc.HttpError as e:
with excutils.save_and_reraise_exception(logger=LOG) as sare:
# If the LPAR was not found, raise a more descriptive error
if e.response.status == 404:
sare.reraise = False
raise exc.InstanceNotFound(instance_id=instance.uuid)
| apache-2.0 |
zaccoz/odoo | addons/website_blog/tests/common.py | 279 | 1476 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestWebsiteBlogCommon(common.TransactionCase):
def setUp(self):
super(TestWebsiteBlogCommon, self).setUp()
Users = self.env['res.users']
group_blog_manager_id = self.ref('base.group_document_user')
group_employee_id = self.ref('base.group_user')
group_public_id = self.ref('base.group_public')
self.user_employee = Users.with_context({'no_reset_password': True}).create({
'name': 'Armande Employee',
'login': 'armande',
'alias_name': 'armande',
'email': 'armande.employee@example.com',
'notify_email': 'none',
'groups_id': [(6, 0, [group_employee_id])]
})
self.user_blogmanager = Users.with_context({'no_reset_password': True}).create({
'name': 'Bastien BlogManager',
'login': 'bastien',
'alias_name': 'bastien',
'email': 'bastien.blogmanager@example.com',
'notify_email': 'none',
'groups_id': [(6, 0, [group_blog_manager_id, group_employee_id])]
})
self.user_public = Users.with_context({'no_reset_password': True}).create({
'name': 'Cedric Public',
'login': 'cedric',
'alias_name': 'cedric',
'email': 'cedric.public@example.com',
'notify_email': 'none',
'groups_id': [(6, 0, [group_public_id])]
})
| agpl-3.0 |
mouadino/lymph | lymph/core/channels.py | 8 | 1843 | import gevent
import gevent.queue
from lymph.exceptions import Timeout, Nack, RemoteError
from lymph.core.messages import Message
class Channel(object):
def __init__(self, request, server):
self.request = request
self.server = server
class RequestChannel(Channel):
def __init__(self, request, server):
super(RequestChannel, self).__init__(request, server)
self.queue = gevent.queue.Queue()
def recv(self, msg):
self.queue.put(msg)
def get(self, timeout=1):
try:
msg = self.queue.get(timeout=timeout)
if msg.type == Message.NACK:
raise Nack(self.request)
elif msg.type == Message.ERROR:
raise RemoteError.from_reply(self.request, msg)
return msg
except gevent.queue.Empty:
raise Timeout(self.request)
finally:
self.close()
def close(self):
del self.server.channels[self.request.id]
class ReplyChannel(Channel):
def __init__(self, request, server):
super(ReplyChannel, self).__init__(request, server)
self._sent_reply = False
def reply(self, body):
self.server.send_reply(self.request, body)
self._sent_reply = True
def ack(self, unless_reply_sent=False):
if unless_reply_sent and self._sent_reply:
return
self.server.send_reply(self.request, None, msg_type=Message.ACK)
self._sent_reply = True
def nack(self, unless_reply_sent=False):
if unless_reply_sent and self._sent_reply:
return
self.server.send_reply(self.request, None, msg_type=Message.NACK)
self._sent_reply = True
def error(self, **body):
self.server.send_reply(self.request, body, msg_type=Message.ERROR)
def close(self):
pass
| apache-2.0 |
toshywoshy/ansible | lib/ansible/plugins/cliconf/aruba.py | 13 | 3247 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
cliconf: aruba
short_description: Use aruba cliconf to run command on Aruba platform
description:
- This aruba plugin provides low level abstraction apis for
sending and receiving CLI commands from Aruba network devices.
version_added: 2.4
"""
import re
import json
from itertools import chain
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.plugins.cliconf import CliconfBase, enable_mode
class Cliconf(CliconfBase):
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'aruba'
reply = self.get('show version')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Version (\S+)', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'^MODEL: (\S+)\),', data, re.M)
if match:
device_info['network_os_model'] = match.group(1)
reply = self.get('show hostname')
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'^Hostname is (.+)', data, re.M)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
@enable_mode
def get_config(self, source='running', format='text', flags=None):
if source not in ('running', 'startup'):
return self.invalid_params("fetching configuration from %s is not supported" % source)
if source == 'running':
cmd = 'show running-config all'
else:
cmd = 'show configuration'
return self.send_command(cmd)
@enable_mode
def edit_config(self, command):
for cmd in chain(['configure terminal'], to_list(command), ['end']):
self.send_command(cmd)
def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
return json.dumps(result)
def set_cli_prompt_context(self):
"""
Make sure we are in the operational cli mode
:return: None
"""
if self._connection.connected:
self._update_cli_prompt_context(config_context=')#')
| gpl-3.0 |
thomasboyt/zulip | bots/zephyr_mirror.py | 114 | 2868 | #!/usr/bin/env python
# Copyright (C) 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import subprocess
import os
import traceback
import signal
from zephyr_mirror_backend import parse_args
def die(signal, frame):
# We actually want to exit, so run os._exit (so as not to be caught and restarted)
os._exit(1)
signal.signal(signal.SIGINT, die)
(options, args) = parse_args()
sys.path[:0] = [os.path.join(options.root_path, 'api')]
from zulip import RandomExponentialBackoff
args = [os.path.join(options.root_path, "user_root", "zephyr_mirror_backend.py")]
args.extend(sys.argv[1:])
if options.sync_subscriptions:
subprocess.call(args)
sys.exit(0)
if options.forward_class_messages and not options.noshard:
sys.path.append("/home/zulip/zulip")
if options.on_startup_command is not None:
subprocess.call([options.on_startup_command])
from zerver.lib.parallel import run_parallel
print "Starting parallel zephyr class mirroring bot"
jobs = list("0123456789abcdef")
def run_job(shard):
subprocess.call(args + ["--shard=%s" % (shard,)])
return 0
for (status, job) in run_parallel(run_job, jobs, threads=16):
print "A mirroring shard died!"
pass
sys.exit(0)
backoff = RandomExponentialBackoff(timeout_success_equivalent=300)
while backoff.keep_going():
print "Starting zephyr mirroring bot"
try:
subprocess.call(args)
except:
traceback.print_exc()
backoff.fail()
print ""
print ""
print "ERROR: The Zephyr mirroring bot is unable to continue mirroring Zephyrs."
print "This is often caused by failing to maintain unexpired Kerberos tickets"
print "or AFS tokens. See https://zulip.com/zephyr for documentation on how to"
print "maintain unexpired Kerberos tickets and AFS tokens."
print ""
sys.exit(1)
| apache-2.0 |
baiwyc119/nw.js | tools/build_native_modules.py | 85 | 1916 | #!/usr/bin/env python
import os, re, sys
import subprocess
native_modules = ['nw_test_loop_without_handle',
'bignum',
'dtrace-provider',
'ref',
'lame',
];
script_dir = os.path.dirname(__file__)
native_root = os.path.join(script_dir, os.pardir, 'tests', 'node_modules')
native_root = os.path.normpath(native_root)
native_root = os.path.abspath(native_root)
nw_gyp_script = os.path.normpath(
os.path.join(script_dir,
os.pardir,
'tests',
'node_modules',
'nw-gyp',
'bin',
'nw-gyp.js'))
nw_gyp_script = os.path.abspath(nw_gyp_script)
cur_dir = os.getcwd()
#get node-webkit target version
nw_readme_md = os.path.join(os.path.dirname(__file__), '..', 'README.md')
f = open(nw_readme_md)
for line in f:
if re.match('\[v\d*.\d*.\d*[\s\S]*\]', line):
target = line.split()[0][2:]
break
import optparse
parser = optparse.OptionParser()
parser.add_option('-t','--target',
help='the node-webkit verison')
opts, args = parser.parse_args()
if opts.target:
target = opts.target
exec_args = ['nw-gyp',
'configure',
'--target=%s'%(target),
'build']
win = sys.platform in ('win32', 'cygwin')
#We need to rebuild a submodule in http-auth
apache_crypt_path = os.path.join(
script_dir,
"..",
"tests",
"node_modules",
"http-auth",
"node_modules",
"htpasswd",
"node_modules",
"apache-crypt")
os.chdir(apache_crypt_path)
subprocess.call(exec_args)
for dir in native_modules:
if dir == 'bignum' and win:
continue
native_dir = os.path.join(native_root, dir)
os.chdir(native_dir)
#exec_args[1] = os.path.relpath(nw_gyp_script, os.getcwd())
subprocess.call(exec_args)
#os.execl(node_gyp_script, '', 'build')
os.chdir(cur_dir)
| mit |
bright-sparks/titanium_mobile | node_modules/ioslib/node_modules/node-ios-device/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| apache-2.0 |
hexpl0it/plugin.video.genesi-ita | resources/lib/libraries/f4mproxy/utils/python_rc4.py | 8 | 1114 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Pure-Python RC4 implementation."""
from .rc4 import RC4
from .cryptomath import *
def new(key):
return Python_RC4(key)
class Python_RC4(RC4):
def __init__(self, keyBytes):
RC4.__init__(self, keyBytes, "python")
S = [i for i in range(256)]
j = 0
for i in range(256):
j = (j + S[i] + keyBytes[i % len(keyBytes)]) % 256
S[i], S[j] = S[j], S[i]
self.S = S
self.i = 0
self.j = 0
def encrypt(self, plaintextBytes):
ciphertextBytes = plaintextBytes[:]
S = self.S
i = self.i
j = self.j
for x in range(len(ciphertextBytes)):
i = (i + 1) % 256
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i]
t = (S[i] + S[j]) % 256
ciphertextBytes[x] ^= S[t]
self.i = i
self.j = j
return ciphertextBytes
def decrypt(self, ciphertext):
return self.encrypt(ciphertext)
| gpl-3.0 |
retomerz/intellij-community | python/helpers/pydev/_pydev_bundle/pydev_localhost.py | 22 | 1215 | from _pydevd_bundle import pydevd_constants
from _pydev_imps._pydev_saved_modules import socket
_cache = None
def get_localhost():
'''
Should return 127.0.0.1 in ipv4 and ::1 in ipv6
localhost is not used because on windows vista/windows 7, there can be issues where the resolving doesn't work
properly and takes a lot of time (had this issue on the pyunit server).
Using the IP directly solves the problem.
'''
#TODO: Needs better investigation!
global _cache
if _cache is None:
try:
for addr_info in socket.getaddrinfo("localhost", 80, 0, 0, socket.SOL_TCP):
config = addr_info[4]
if config[0] == '127.0.0.1':
_cache = '127.0.0.1'
return _cache
except:
#Ok, some versions of Python don't have getaddrinfo or SOL_TCP... Just consider it 127.0.0.1 in this case.
_cache = '127.0.0.1'
else:
_cache = 'localhost'
return _cache
def get_socket_name():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', 0))
return sock.getsockname()
| apache-2.0 |
mengxn/tensorflow | tensorflow/contrib/compiler/jit_test.py | 50 | 10987 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.compiler.jit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.compiler import jit
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import function
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
_REGISTERED_OPS = op_def_registry.get_registered_ops()
def enable_jit_nonstateful(node_def):
try:
return not _REGISTERED_OPS[node_def.op].is_stateful
except KeyError:
raise ValueError("Unregistered op being created: %s" % node_def)
class JITTest(test.TestCase):
def compute(self, use_jit, compute_fn):
random_seed.set_random_seed(1234)
with self.test_session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(use_jit):
r = compute_fn()
sess.run(variables.global_variables_initializer())
return (r, sess.run(r))
def testJITCreateOpsLambda(self):
"""Test several ways of customizing the compilation attribute."""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = random_ops.random_uniform((1,), seed=1)
return inputs
v_false_1_t, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
v_true_1_t, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
v_all_true_t, _ = self.compute(True, create_ops)
self.assertFalse(v_false_1_t.op.get_attr("_XlaCompile"))
v_true_1_t_sampler_op = v_true_1_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
v_all_true_t_sampler_op = v_all_true_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
self.assertFalse(v_true_1_t_sampler_op.get_attr("_XlaCompile"))
self.assertTrue(v_all_true_t_sampler_op.get_attr("_XlaCompile"))
self.assertTrue(v_true_1_t.op.get_attr("_XlaCompile"))
self.assertTrue(v_all_true_t.op.get_attr("_XlaCompile"))
# Additionally ensure that where no JIT compilation happens on the
# random_uniform op, the output values are identical to the case
# where no JIT compilation happens anywhere.
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
def testJITXlaScope(self):
with self.test_session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
# XlaScope 0
a1 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope 1
a2 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 1
a3 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 2
a4 = constant_op.constant(1)
# XlaScope still 1, depth 1
a5 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope now 2, depth 0
a6 = constant_op.constant(1)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a3.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a4.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a5.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_2", a6.op.get_attr("_XlaScope"))
def testJITVariableSeed(self):
"""Test that the stateful initializer is not marked for compilation.
XLA does not currently support seeded initialization and XLA initializers
therefore return different values than non-XLA counterparts. Here
we ensure that if we can disable JIT compilation for the initializers and
get the same variable values as if no JIT compilation happened.
"""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable("var", (1,))
return inputs
_, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
_, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
class CompilationEnabledInGradientTest(test.TestCase):
def testCompilationInGradient(self):
with self.test_session():
x = constant_op.constant(3)
y_nc = math_ops.add(x, x, name="not_compiled")
with jit.experimental_jit_scope():
y_c = math_ops.add(y_nc, y_nc, name="compiled")
x_grads = gradients.gradients([y_c], [x])[0]
operations = x_grads.graph.get_operations()
c_grad_ops = [
op for op in operations if "gradients/compiled" in op.name]
nc_grad_ops = [
op for op in operations if "gradients/not_compiled" in op.name]
self.assertGreater(len(c_grad_ops), 0)
self.assertGreater(len(nc_grad_ops), 0)
for cg in c_grad_ops:
self.assertTrue(cg.get_attr("_XlaCompile"))
for ncg in nc_grad_ops:
with self.assertRaisesRegexp(ValueError, "No attr named"):
ncg.get_attr("_XlaCompile")
# d/dx (4 * x)
self.assertAllClose(4, x_grads.eval())
def testCompilationGradientScopeNames(self):
with self.test_session(graph=ops.Graph()):
with jit.experimental_jit_scope():
# XlaScope 0
a1 = constant_op.constant(1)
a1t = a1 + a1
with jit.experimental_jit_scope():
# XlaScope 1
a2 = constant_op.constant(1)
a2t = a2 + a2
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertTrue(grad_a1.op.get_attr("_XlaCompile"))
self.assertTrue(grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0", grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", grad_a2.op.get_attr("_XlaScope"))
def testCompilationSeparateGradientScopeNames(self):
with self.test_session(graph=ops.Graph()):
with jit.experimental_jit_scope(True, separate_compiled_gradients=True):
# XlaScope 0
a1 = constant_op.constant(1)
a1t = a1 + a1
with jit.experimental_jit_scope(True, separate_compiled_gradients=True):
# XlaScope 1
a2 = constant_op.constant(1)
a2t = a2 + a2
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertTrue(grad_a1.op.get_attr("_XlaCompile"))
self.assertTrue(grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0_grad_GA",
grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1_grad_GB",
grad_a2.op.get_attr("_XlaScope"))
def testPlaysNicelyWithDefun(self):
with self.test_session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True): # This should be ignored
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"function_mulop", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled, with the same
# _XlaScope as the function itself.
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"function_mulop", grad_op.get_attr("_XlaScope"))
# Ensure the ops run: grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
def testPlaysNicelyWithDefunSeparateGradientScope(self):
with self.test_session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True): # This should be ignored
@function.Defun(
compiled=True, noinline=True, separate_compiled_gradients=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"function_mulop", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled, with a different
# _XlaScope from the function itself.
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"function_mulop_grad_GA",
grad_op.get_attr("_XlaScope"))
# Ensure the ops run: grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
if __name__ == "__main__":
test.main()
| apache-2.0 |
dfalt974/SickRage | lib/rtorrent/tracker.py | 173 | 5212 | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# from rtorrent.rpc import Method
import rtorrent.rpc
from rtorrent.common import safe_repr
Method = rtorrent.rpc.Method
class Tracker:
"""Represents an individual tracker within a L{Torrent} instance."""
def __init__(self, _rt_obj, info_hash, **kwargs):
self._rt_obj = _rt_obj
self.info_hash = info_hash # : info hash for the torrent using this tracker
for k in kwargs.keys():
setattr(self, k, kwargs.get(k, None))
# for clarity's sake...
self.index = self.group # : position of tracker within the torrent's tracker list
self.rpc_id = "{0}:t{1}".format(
self.info_hash, self.index) # : unique id to pass to rTorrent
def __repr__(self):
return safe_repr("Tracker(index={0}, url=\"{1}\")",
self.index, self.url)
def enable(self):
"""Alias for set_enabled("yes")"""
self.set_enabled("yes")
def disable(self):
"""Alias for set_enabled("no")"""
self.set_enabled("no")
def update(self):
"""Refresh tracker data
@note: All fields are stored as attributes to self.
@return: None
"""
multicall = rtorrent.rpc.Multicall(self)
retriever_methods = [m for m in methods
if m.is_retriever() and m.is_available(self._rt_obj)]
for method in retriever_methods:
multicall.add(method, self.rpc_id)
multicall.call()
methods = [
# RETRIEVERS
Method(Tracker, 'is_enabled', 't.is_enabled', boolean=True),
Method(Tracker, 'get_id', 't.get_id'),
Method(Tracker, 'get_scrape_incomplete', 't.get_scrape_incomplete'),
Method(Tracker, 'is_open', 't.is_open', boolean=True),
Method(Tracker, 'get_min_interval', 't.get_min_interval'),
Method(Tracker, 'get_scrape_downloaded', 't.get_scrape_downloaded'),
Method(Tracker, 'get_group', 't.get_group'),
Method(Tracker, 'get_scrape_time_last', 't.get_scrape_time_last'),
Method(Tracker, 'get_type', 't.get_type'),
Method(Tracker, 'get_normal_interval', 't.get_normal_interval'),
Method(Tracker, 'get_url', 't.get_url'),
Method(Tracker, 'get_scrape_complete', 't.get_scrape_complete',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_activity_time_last', 't.activity_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_activity_time_next', 't.activity_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_failed_time_last', 't.failed_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_failed_time_next', 't.failed_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_success_time_last', 't.success_time_last',
min_version=(0, 8, 9),
),
Method(Tracker, 'get_success_time_next', 't.success_time_next',
min_version=(0, 8, 9),
),
Method(Tracker, 'can_scrape', 't.can_scrape',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'get_failed_counter', 't.failed_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'get_scrape_counter', 't.scrape_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'get_success_counter', 't.success_counter',
min_version=(0, 8, 9)
),
Method(Tracker, 'is_usable', 't.is_usable',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'is_busy', 't.is_busy',
min_version=(0, 9, 1),
boolean=True
),
Method(Tracker, 'is_extra_tracker', 't.is_extra_tracker',
min_version=(0, 9, 1),
boolean=True,
),
Method(Tracker, "get_latest_sum_peers", "t.latest_sum_peers",
min_version=(0, 9, 0)
),
Method(Tracker, "get_latest_new_peers", "t.latest_new_peers",
min_version=(0, 9, 0)
),
# MODIFIERS
Method(Tracker, 'set_enabled', 't.set_enabled'),
]
| gpl-3.0 |
dguest/delphes-realistic-tracking | python/DelphesAnalysis/CPconfig.py | 15 | 1436 | #configuration of the ControlPlot machinery
from collections import namedtuple
controlPlot = namedtuple("controlPlot", ["label","module","classname","kwargs"])
eventCollection = namedtuple("eventCollection",["label","collection"])
eventProducer = namedtuple("eventProducer", ["label","module","function","kwargs"])
eventWeight = namedtuple("eventWeight", ["label","module","classname","kwargs"])
class configuration:
# default I/O
defaultFilename = "controlPlots"
RDSname = "rds_delphes"
WSname = "workspace_ras"
# mode: plots or dataset
runningMode = "plots"
# event selection class
eventSelection = ""
# control plot classes
controlPlots = [ ]
# event content: lists of eventCollection, eventProducer, and eventWeight objects respectively.
eventCollections = [ ]
eventProducers = [ ]
eventWeights = [ ]
class eventDumpConfig:
# fine-tuning of the event content for display
productsToPrint = [ ] # list of product to display (use the producer label)
collectionsToHide = [ ] # collections used in the analysis but not printed (use the collection label)
# import the actual implementation of the configuration
import os
theConfig = os.getenv("DelphesAnalysisCfg")
if theConfig is not None:
configImplementation = __import__(os.path.splitext(theConfig)[0])
configuration = configImplementation.configuration
eventDumpConfig = configImplementation.eventDumpConfig
| gpl-3.0 |
kevcooper/bitcoin | test/functional/blockchain.py | 2 | 5880 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
from test_framework.test_framework import (BitcoinTestFramework, BITCOIND_PROC_WAIT_TIMEOUT)
from test_framework.util import (
assert_equal,
assert_raises,
assert_raises_jsonrpc,
assert_is_hex_string,
assert_is_hash_string,
)
class BlockchainTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [['-stopatheight=207']]
def run_test(self):
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
assert self.nodes[0].verifychain(4, 0)
def _test_getchaintxstats(self):
chaintxstats = self.nodes[0].getchaintxstats(1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 17000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
assert_equal(res['total_amount'], res3['total_amount'])
assert_equal(res['transactions'], res3['transactions'])
assert_equal(res['height'], res3['height'])
assert_equal(res['txouts'], res3['txouts'])
assert_equal(res['bogosize'], res3['bogosize'])
assert_equal(res['bestblock'], res3['bestblock'])
assert_equal(res['hash_serialized_2'], res3['hash_serialized_2'])
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_jsonrpc(-5, "Block not found",
node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generate(6)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generate(1)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].process.wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
self.nodes[0] = self.start_node(0, self.options.tmpdir)
assert_equal(self.nodes[0].getblockcount(), 207)
if __name__ == '__main__':
BlockchainTest().main()
| mit |
Peddle/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/form.py | 56 | 3270 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import FORMNS
from .element import Element
# Autogenerated
def Button(**args):
return Element(qname = (FORMNS,'button'), **args)
def Checkbox(**args):
return Element(qname = (FORMNS,'checkbox'), **args)
def Column(**args):
return Element(qname = (FORMNS,'column'), **args)
def Combobox(**args):
return Element(qname = (FORMNS,'combobox'), **args)
def ConnectionResource(**args):
return Element(qname = (FORMNS,'connection-resource'), **args)
def Date(**args):
return Element(qname = (FORMNS,'date'), **args)
def File(**args):
return Element(qname = (FORMNS,'file'), **args)
def FixedText(**args):
return Element(qname = (FORMNS,'fixed-text'), **args)
def Form(**args):
return Element(qname = (FORMNS,'form'), **args)
def FormattedText(**args):
return Element(qname = (FORMNS,'formatted-text'), **args)
def Frame(**args):
return Element(qname = (FORMNS,'frame'), **args)
def GenericControl(**args):
return Element(qname = (FORMNS,'generic-control'), **args)
def Grid(**args):
return Element(qname = (FORMNS,'grid'), **args)
def Hidden(**args):
return Element(qname = (FORMNS,'hidden'), **args)
def Image(**args):
return Element(qname = (FORMNS,'image'), **args)
def ImageFrame(**args):
return Element(qname = (FORMNS,'image-frame'), **args)
def Item(**args):
return Element(qname = (FORMNS,'item'), **args)
def ListProperty(**args):
return Element(qname = (FORMNS,'list-property'), **args)
def ListValue(**args):
return Element(qname = (FORMNS,'list-value'), **args)
def Listbox(**args):
return Element(qname = (FORMNS,'listbox'), **args)
def Number(**args):
return Element(qname = (FORMNS,'number'), **args)
def Option(**args):
return Element(qname = (FORMNS,'option'), **args)
def Password(**args):
return Element(qname = (FORMNS,'password'), **args)
def Properties(**args):
return Element(qname = (FORMNS,'properties'), **args)
def Property(**args):
return Element(qname = (FORMNS,'property'), **args)
def Radio(**args):
return Element(qname = (FORMNS,'radio'), **args)
def Text(**args):
return Element(qname = (FORMNS,'text'), **args)
def Textarea(**args):
return Element(qname = (FORMNS,'textarea'), **args)
def Time(**args):
return Element(qname = (FORMNS,'time'), **args)
def ValueRange(**args):
return Element(qname = (FORMNS,'value-range'), **args)
| apache-2.0 |
geektoni/Influenza-Like-Illness-Predictor | data_analysis/compare_pageviews_pagecounts.py | 1 | 3253 | import glob
import os
import argparse
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1.2)
def generate_dataframe(country, data="new_data"):
path = "../data/wikipedia_{}/{}".format(country, data)
files = [os.path.basename(f) for f in glob.glob(path + "/*.csv", recursive=True)]
years = [f.split(".")[0] for f in files]
years.sort()
df = pd.DataFrame()
for y in years:
if int(y) >= 2015 and data != "pageviews" and data != "old_data" and data != "cyclerank_pageviews":
continue
tmp = pd.read_csv(path + "/" + y + ".csv")
df = pd.concat([df, tmp], ignore_index=True)
# Fill nan values
df.fillna(0, inplace=True)
# Sum together all the pageviews
total_pageviews = df.sum(axis=1).to_frame()
total_pageviews.rename(columns={0: "pagecounts_unorm"}, inplace=True)
total_pageviews.reset_index(inplace=True)
total_pageviews["week"] = df["Week"]
# Remove frames with zero counts
indexes = total_pageviews[total_pageviews.pagecounts_unorm == 0].index
total_pageviews.drop(indexes, inplace=True)
total_pageviews.reset_index(inplace=True)
# Normalize the data
scaler = MinMaxScaler()
total_pageviews["pagecounts"] = scaler.fit_transform(total_pageviews["pagecounts_unorm"].values.reshape(-1,1))
return total_pageviews
def get_label(country):
if country == "italy":
return "Italian"
elif country == "germany":
return "German"
else:
return "Dutch"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default="pageviews")
parser.add_argument("--legend", action="store_true", default=False)
args = parser.parse_args()
countries = ["italy", "germany", "netherlands"]
# Generate figure
fig = plt.figure(figsize=(8,4))
# Set the weeks
step = 20
weeks = []
max_weeks = 0
total_observations = 0
for c in countries:
df = generate_dataframe(c, args.data)
# Plot the data
sns.lineplot(data=df["pagecounts"], label=get_label(c), legend=False)
# Set the weeks we need to plot
if max_weeks < len(df["week"]):
weeks=[]
counter=0
for e in df["week"].to_list():
if counter%step == 0:
weeks.append(e)
counter += 1
max_weeks = len(df["week"])
# Set the max number of observations
total_observations = len(df["pagecounts"]) if len(df["pagecounts"]) > total_observations else total_observations
# Print the xticks
plt.xticks(np.arange(0, total_observations, step=step), weeks, rotation=90)
# Shrink current axis by 20%
if args.legend:
ax = fig.axes[0]
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
#plt.show()
plt.savefig("pageviews-numerosity-{}.png".format(args.data), dpi=300, bbox_inches='tight')
| mit |
snbuback/django-guardian | guardian/tests/direct_rel_test.py | 41 | 8059 | from __future__ import unicode_literals
from .testapp.models import Mixed
from .testapp.models import Project
from .testapp.models import ProjectGroupObjectPermission
from .testapp.models import ProjectUserObjectPermission
from django.contrib.auth.models import Group, Permission
from django.test import TestCase
from guardian.compat import get_user_model
from guardian.shortcuts import assign_perm
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_objects_for_group
from guardian.shortcuts import get_objects_for_user
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import remove_perm
User = get_user_model()
class TestDirectUserPermissions(TestCase):
def setUp(self):
self.joe = User.objects.create_user('joe', 'joe@example.com', 'foobar')
self.project = Project.objects.create(name='Foobar')
def get_perm(self, codename):
filters = {'content_type__app_label': 'testapp', 'codename': codename}
return Permission.objects.get(**filters)
def test_after_perm_is_created_without_shortcut(self):
perm = self.get_perm('add_project')
# we should not use assign here - if generic user obj perms model is
# used then everything could go fine if using assign shortcut and we
# would not be able to see any problem
ProjectUserObjectPermission.objects.create(
user=self.joe,
permission=perm,
content_object=self.project,
)
self.assertTrue(self.joe.has_perm('add_project', self.project))
def test_assign_perm(self):
assign_perm('add_project', self.joe, self.project)
filters = {
'content_object': self.project,
'permission__codename': 'add_project',
'user': self.joe,
}
result = ProjectUserObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 1)
def test_remove_perm(self):
assign_perm('add_project', self.joe, self.project)
filters = {
'content_object': self.project,
'permission__codename': 'add_project',
'user': self.joe,
}
result = ProjectUserObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 1)
remove_perm('add_project', self.joe, self.project)
result = ProjectUserObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 0)
def test_get_users_with_perms(self):
User.objects.create_user('john', 'john@foobar.com', 'john')
jane = User.objects.create_user('jane', 'jane@foobar.com', 'jane')
assign_perm('add_project', self.joe, self.project)
assign_perm('change_project', self.joe, self.project)
assign_perm('change_project', jane, self.project)
self.assertEqual(get_users_with_perms(self.project, attach_perms=True),
{
self.joe: ['add_project', 'change_project'],
jane: ['change_project'],
})
def test_get_users_with_perms_plus_groups(self):
User.objects.create_user('john', 'john@foobar.com', 'john')
jane = User.objects.create_user('jane', 'jane@foobar.com', 'jane')
group = Group.objects.create(name='devs')
self.joe.groups.add(group)
assign_perm('add_project', self.joe, self.project)
assign_perm('change_project', group, self.project)
assign_perm('change_project', jane, self.project)
self.assertEqual(get_users_with_perms(self.project, attach_perms=True),
{
self.joe: ['add_project', 'change_project'],
jane: ['change_project'],
})
def test_get_objects_for_user(self):
foo = Project.objects.create(name='foo')
bar = Project.objects.create(name='bar')
assign_perm('add_project', self.joe, foo)
assign_perm('add_project', self.joe, bar)
assign_perm('change_project', self.joe, bar)
result = get_objects_for_user(self.joe, 'testapp.add_project')
self.assertEqual(sorted(p.pk for p in result), sorted([foo.pk, bar.pk]))
class TestDirectGroupPermissions(TestCase):
def setUp(self):
self.joe = User.objects.create_user('joe', 'joe@example.com', 'foobar')
self.group = Group.objects.create(name='admins')
self.joe.groups.add(self.group)
self.project = Project.objects.create(name='Foobar')
def get_perm(self, codename):
filters = {'content_type__app_label': 'testapp', 'codename': codename}
return Permission.objects.get(**filters)
def test_after_perm_is_created_without_shortcut(self):
perm = self.get_perm('add_project')
# we should not use assign here - if generic user obj perms model is
# used then everything could go fine if using assign shortcut and we
# would not be able to see any problem
ProjectGroupObjectPermission.objects.create(
group=self.group,
permission=perm,
content_object=self.project,
)
self.assertTrue(self.joe.has_perm('add_project', self.project))
def test_assign_perm(self):
assign_perm('add_project', self.group, self.project)
filters = {
'content_object': self.project,
'permission__codename': 'add_project',
'group': self.group,
}
result = ProjectGroupObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 1)
def test_remove_perm(self):
assign_perm('add_project', self.group, self.project)
filters = {
'content_object': self.project,
'permission__codename': 'add_project',
'group': self.group,
}
result = ProjectGroupObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 1)
remove_perm('add_project', self.group, self.project)
result = ProjectGroupObjectPermission.objects.filter(**filters).count()
self.assertEqual(result, 0)
def test_get_groups_with_perms(self):
Group.objects.create(name='managers')
devs = Group.objects.create(name='devs')
assign_perm('add_project', self.group, self.project)
assign_perm('change_project', self.group, self.project)
assign_perm('change_project', devs, self.project)
self.assertEqual(get_groups_with_perms(self.project, attach_perms=True),
{
self.group: ['add_project', 'change_project'],
devs: ['change_project'],
})
def test_get_objects_for_group(self):
foo = Project.objects.create(name='foo')
bar = Project.objects.create(name='bar')
assign_perm('add_project', self.group, foo)
assign_perm('add_project', self.group, bar)
assign_perm('change_project', self.group, bar)
result = get_objects_for_group(self.group, 'testapp.add_project')
self.assertEqual(sorted(p.pk for p in result), sorted([foo.pk, bar.pk]))
class TestMixedDirectAndGenericObjectPermission(TestCase):
def setUp(self):
self.joe = User.objects.create_user('joe', 'joe@example.com', 'foobar')
self.group = Group.objects.create(name='admins')
self.joe.groups.add(self.group)
self.mixed = Mixed.objects.create(name='Foobar')
def test_get_users_with_perms_plus_groups(self):
User.objects.create_user('john', 'john@foobar.com', 'john')
jane = User.objects.create_user('jane', 'jane@foobar.com', 'jane')
group = Group.objects.create(name='devs')
self.joe.groups.add(group)
assign_perm('add_mixed', self.joe, self.mixed)
assign_perm('change_mixed', group, self.mixed)
assign_perm('change_mixed', jane, self.mixed)
self.assertEqual(get_users_with_perms(self.mixed, attach_perms=True),
{
self.joe: ['add_mixed', 'change_mixed'],
jane: ['change_mixed'],
})
| bsd-2-clause |
heke123/chromium-crosswalk | ppapi/generators/idl_parser.py | 47 | 39723 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Parser for PPAPI IDL """
#
# IDL Parser
#
# The parser is uses the PLY yacc library to build a set of parsing rules based
# on WebIDL.
#
# WebIDL, and WebIDL regular expressions can be found at:
# http://dev.w3.org/2006/webapi/WebIDL/
# PLY can be found at:
# http://www.dabeaz.com/ply/
#
# The parser generates a tree by recursively matching sets of items against
# defined patterns. When a match is made, that set of items is reduced
# to a new item. The new item can provide a match for parent patterns.
# In this way an AST is built (reduced) depth first.
import getopt
import glob
import os.path
import re
import sys
import time
from idl_ast import IDLAst
from idl_log import ErrOut, InfoOut, WarnOut
from idl_lexer import IDLLexer
from idl_node import IDLAttribute, IDLFile, IDLNode
from idl_option import GetOption, Option, ParseOptions
from idl_lint import Lint
from ply import lex
from ply import yacc
Option('build_debug', 'Debug tree building.')
Option('parse_debug', 'Debug parse reduction steps.')
Option('token_debug', 'Debug token generation.')
Option('dump_tree', 'Dump the tree.')
Option('srcroot', 'Working directory.', default=os.path.join('..', 'api'))
Option('include_private', 'Include private IDL directory in default API paths.')
#
# ERROR_REMAP
#
# Maps the standard error formula into a more friendly error message.
#
ERROR_REMAP = {
'Unexpected ")" after "(".' : 'Empty argument list.',
'Unexpected ")" after ",".' : 'Missing argument.',
'Unexpected "}" after ",".' : 'Trailing comma in block.',
'Unexpected "}" after "{".' : 'Unexpected empty block.',
'Unexpected comment after "}".' : 'Unexpected trailing comment.',
'Unexpected "{" after keyword "enum".' : 'Enum missing name.',
'Unexpected "{" after keyword "struct".' : 'Struct missing name.',
'Unexpected "{" after keyword "interface".' : 'Interface missing name.',
}
# DumpReduction
#
# Prints out the set of items which matched a particular pattern and the
# new item or set it was reduced to.
def DumpReduction(cls, p):
if p[0] is None:
InfoOut.Log("OBJ: %s(%d) - None\n" % (cls, len(p)))
InfoOut.Log(" [%s]\n" % [str(x) for x in p[1:]])
else:
out = ""
for index in range(len(p) - 1):
out += " >%s< " % str(p[index + 1])
InfoOut.Log("OBJ: %s(%d) - %s : %s\n" % (cls, len(p), str(p[0]), out))
# CopyToList
#
# Takes an input item, list, or None, and returns a new list of that set.
def CopyToList(item):
# If the item is 'Empty' make it an empty list
if not item: item = []
# If the item is not a list
if type(item) is not type([]): item = [item]
# Make a copy we can modify
return list(item)
# ListFromConcat
#
# Generate a new List by joining of two sets of inputs which can be an
# individual item, a list of items, or None.
def ListFromConcat(*items):
itemsout = []
for item in items:
itemlist = CopyToList(item)
itemsout.extend(itemlist)
return itemsout
# TokenTypeName
#
# Generate a string which has the type and value of the token.
def TokenTypeName(t):
if t.type == 'SYMBOL': return 'symbol %s' % t.value
if t.type in ['HEX', 'INT', 'OCT', 'FLOAT']:
return 'value %s' % t.value
if t.type == 'STRING' : return 'string "%s"' % t.value
if t.type == 'COMMENT' : return 'comment'
if t.type == t.value: return '"%s"' % t.value
return 'keyword "%s"' % t.value
#
# IDL Parser
#
# The Parser inherits the from the Lexer to provide PLY with the tokenizing
# definitions. Parsing patterns are encoded as function where p_<name> is
# is called any time a patern matching the function documentation is found.
# Paterns are expressed in the form of:
# """ <new item> : <item> ....
# | <item> ...."""
#
# Where new item is the result of a match against one or more sets of items
# separated by the "|".
#
# The function is called with an object 'p' where p[0] is the output object
# and p[n] is the set of inputs for positive values of 'n'. Len(p) can be
# used to distinguish between multiple item sets in the pattern.
#
# For more details on parsing refer to the PLY documentation at
# http://www.dabeaz.com/ply/
#
#
# The parser uses the following conventions:
# a <type>_block defines a block of <type> definitions in the form of:
# [comment] [ext_attr_block] <type> <name> '{' <type>_list '}' ';'
# A block is reduced by returning an object of <type> with a name of <name>
# which in turn has <type>_list as children.
#
# A [comment] is a optional C style comment block enclosed in /* ... */ which
# is appended to the adjacent node as a child.
#
# A [ext_attr_block] is an optional list of Extended Attributes which is
# appended to the adjacent node as a child.
#
# a <type>_list defines a list of <type> items which will be passed as a
# list of children to the parent pattern. A list is in the form of:
# [comment] [ext_attr_block] <...DEF...> ';' <type>_list | (empty)
# or
# [comment] [ext_attr_block] <...DEF...> <type>_cont
#
# In the first form, the list is reduced recursively, where the right side
# <type>_list is first reduced then joined with pattern currently being
# matched. The list is terminated with the (empty) pattern is matched.
#
# In the second form the list is reduced recursively, where the right side
# <type>_cont is first reduced then joined with the pattern currently being
# matched. The type_<cont> is in the form of:
# ',' <type>_list | (empty)
# The <type>_cont form is used to consume the ',' which only occurs when
# there is more than one object in the list. The <type>_cont also provides
# the terminating (empty) definition.
#
class IDLParser(IDLLexer):
# TOP
#
# This pattern defines the top of the parse tree. The parse tree is in the
# the form of:
#
# top
# *modifiers
# *comments
# *ext_attr_block
# ext_attr_list
# attr_arg_list
# *integer, value
# *param_list
# *typeref
#
# top_list
# describe_block
# describe_list
# enum_block
# enum_item
# interface_block
# member
# label_block
# label_item
# struct_block
# member
# typedef_decl
# typedef_data
# typedef_func
#
# (* sub matches found at multiple levels and are not truly children of top)
#
# We force all input files to start with two comments. The first comment is a
# Copyright notice followed by a set of file wide Extended Attributes, followed
# by the file comment and finally by file level patterns.
#
# Find the Copyright, File comment, and optional file wide attributes. We
# use a match with COMMENT instead of comments to force the token to be
# present. The extended attributes and the top_list become siblings which
# in turn are children of the file object created from the results of top.
def p_top(self, p):
"""top : COMMENT COMMENT ext_attr_block top_list"""
Copyright = self.BuildComment('Copyright', p, 1)
Filedoc = self.BuildComment('Comment', p, 2)
p[0] = ListFromConcat(Copyright, Filedoc, p[3], p[4])
if self.parse_debug: DumpReduction('top', p)
def p_top_short(self, p):
"""top : COMMENT ext_attr_block top_list"""
Copyright = self.BuildComment('Copyright', p, 1)
Filedoc = IDLNode('Comment', self.lexobj.filename, p.lineno(2)-1,
p.lexpos(2)-1, [self.BuildAttribute('NAME', ''),
self.BuildAttribute('FORM', 'cc')])
p[0] = ListFromConcat(Copyright, Filedoc, p[2], p[3])
if self.parse_debug: DumpReduction('top', p)
# Build a list of top level items.
def p_top_list(self, p):
"""top_list : callback_decl top_list
| describe_block top_list
| dictionary_block top_list
| enum_block top_list
| inline top_list
| interface_block top_list
| label_block top_list
| namespace top_list
| struct_block top_list
| typedef_decl top_list
| bad_decl top_list
| """
if len(p) > 2:
p[0] = ListFromConcat(p[1], p[2])
if self.parse_debug: DumpReduction('top_list', p)
# Recover from error and continue parsing at the next top match.
def p_top_error(self, p):
"""top_list : error top_list"""
p[0] = p[2]
# Recover from error and continue parsing at the next top match.
def p_bad_decl(self, p):
"""bad_decl : modifiers SYMBOL error '}' ';'"""
p[0] = []
#
# Modifier List
#
#
def p_modifiers(self, p):
"""modifiers : comments ext_attr_block"""
p[0] = ListFromConcat(p[1], p[2])
if self.parse_debug: DumpReduction('modifiers', p)
#
# Scoped name is a name with an optional scope.
#
# Used for types and namespace names. eg. foo_bar.hello_world, or
# foo_bar.hello_world.SomeType.
#
def p_scoped_name(self, p):
"""scoped_name : SYMBOL scoped_name_rest"""
p[0] = ''.join(p[1:])
if self.parse_debug: DumpReduction('scoped_name', p)
def p_scoped_name_rest(self, p):
"""scoped_name_rest : '.' scoped_name
|"""
p[0] = ''.join(p[1:])
if self.parse_debug: DumpReduction('scoped_name_rest', p)
#
# Type reference
#
#
def p_typeref(self, p):
"""typeref : scoped_name"""
p[0] = p[1]
if self.parse_debug: DumpReduction('typeref', p)
#
# Comments
#
# Comments are optional list of C style comment objects. Comments are returned
# as a list or None.
#
def p_comments(self, p):
"""comments : COMMENT comments
| """
if len(p) > 1:
child = self.BuildComment('Comment', p, 1)
p[0] = ListFromConcat(child, p[2])
if self.parse_debug: DumpReduction('comments', p)
else:
if self.parse_debug: DumpReduction('no comments', p)
#
# Namespace
#
# A namespace provides a named scope to an enclosed top_list.
#
def p_namespace(self, p):
"""namespace : modifiers NAMESPACE namespace_name '{' top_list '}' ';'"""
children = ListFromConcat(p[1], p[5])
p[0] = self.BuildNamed('Namespace', p, 3, children)
# We allow namespace names of the form foo.bar.baz.
def p_namespace_name(self, p):
"""namespace_name : scoped_name"""
p[0] = p[1]
#
# Dictionary
#
# A dictionary is a named list of optional and required members.
#
def p_dictionary_block(self, p):
"""dictionary_block : modifiers DICTIONARY SYMBOL '{' struct_list '}' ';'"""
p[0] = self.BuildNamed('Dictionary', p, 3, ListFromConcat(p[1], p[5]))
def p_dictionary_errorA(self, p):
"""dictionary_block : modifiers DICTIONARY error ';'"""
p[0] = []
def p_dictionary_errorB(self, p):
"""dictionary_block : modifiers DICTIONARY error '{' struct_list '}' ';'"""
p[0] = []
#
# Callback
#
# A callback is essentially a single function declaration (outside of an
# Interface).
#
def p_callback_decl(self, p):
"""callback_decl : modifiers CALLBACK SYMBOL '=' SYMBOL param_list ';'"""
children = ListFromConcat(p[1], p[6])
p[0] = self.BuildNamed('Callback', p, 3, children)
#
# Inline
#
# Inline blocks define option code to be emitted based on language tag,
# in the form of:
# #inline <LANGUAGE>
# <CODE>
# #endinl
#
def p_inline(self, p):
"""inline : modifiers INLINE"""
words = p[2].split()
name = self.BuildAttribute('NAME', words[1])
lines = p[2].split('\n')
value = self.BuildAttribute('VALUE', '\n'.join(lines[1:-1]) + '\n')
children = ListFromConcat(name, value, p[1])
p[0] = self.BuildProduction('Inline', p, 2, children)
if self.parse_debug: DumpReduction('inline', p)
# Extended Attributes
#
# Extended Attributes denote properties which will be applied to a node in the
# AST. A list of extended attributes are denoted by a brackets '[' ... ']'
# enclosing a comma separated list of extended attributes in the form of:
#
# Name
# Name=HEX | INT | OCT | FLOAT
# Name="STRING"
# Name=Function(arg ...)
# TODO(bradnelson) -Not currently supported:
# ** Name(arg ...) ...
# ** Name=Scope::Value
#
# Extended Attributes are returned as a list or None.
def p_ext_attr_block(self, p):
"""ext_attr_block : '[' ext_attr_list ']'
| """
if len(p) > 1:
p[0] = p[2]
if self.parse_debug: DumpReduction('ext_attr_block', p)
else:
if self.parse_debug: DumpReduction('no ext_attr_block', p)
def p_ext_attr_list(self, p):
"""ext_attr_list : SYMBOL '=' SYMBOL ext_attr_cont
| SYMBOL '=' value ext_attr_cont
| SYMBOL '=' SYMBOL param_list ext_attr_cont
| SYMBOL ext_attr_cont"""
# If there are 4 tokens plus a return slot, this must be in the form
# SYMBOL = SYMBOL|value ext_attr_cont
if len(p) == 5:
p[0] = ListFromConcat(self.BuildAttribute(p[1], p[3]), p[4])
# If there are 5 tokens plus a return slot, this must be in the form
# SYMBOL = SYMBOL (param_list) ext_attr_cont
elif len(p) == 6:
member = self.BuildNamed('Member', p, 3, [p[4]])
p[0] = ListFromConcat(self.BuildAttribute(p[1], member), p[5])
# Otherwise, this must be: SYMBOL ext_attr_cont
else:
p[0] = ListFromConcat(self.BuildAttribute(p[1], 'True'), p[2])
if self.parse_debug: DumpReduction('ext_attribute_list', p)
def p_ext_attr_list_values(self, p):
"""ext_attr_list : SYMBOL '=' '(' values ')' ext_attr_cont
| SYMBOL '=' '(' symbols ')' ext_attr_cont"""
p[0] = ListFromConcat(self.BuildAttribute(p[1], p[4]), p[6])
def p_values(self, p):
"""values : value values_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_symbols(self, p):
"""symbols : SYMBOL symbols_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_symbols_cont(self, p):
"""symbols_cont : ',' SYMBOL symbols_cont
| """
if len(p) > 1: p[0] = ListFromConcat(p[2], p[3])
def p_values_cont(self, p):
"""values_cont : ',' value values_cont
| """
if len(p) > 1: p[0] = ListFromConcat(p[2], p[3])
def p_ext_attr_cont(self, p):
"""ext_attr_cont : ',' ext_attr_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('ext_attribute_cont', p)
def p_ext_attr_func(self, p):
"""ext_attr_list : SYMBOL '(' attr_arg_list ')' ext_attr_cont"""
p[0] = ListFromConcat(self.BuildAttribute(p[1] + '()', p[3]), p[5])
if self.parse_debug: DumpReduction('attr_arg_func', p)
def p_ext_attr_arg_list(self, p):
"""attr_arg_list : SYMBOL attr_arg_cont
| value attr_arg_cont"""
p[0] = ListFromConcat(p[1], p[2])
def p_attr_arg_cont(self, p):
"""attr_arg_cont : ',' attr_arg_list
| """
if self.parse_debug: DumpReduction('attr_arg_cont', p)
if len(p) > 1: p[0] = p[2]
def p_attr_arg_error(self, p):
"""attr_arg_cont : error attr_arg_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('attr_arg_error', p)
#
# Describe
#
# A describe block is defined at the top level. It provides a mechanism for
# attributing a group of ext_attr to a describe_list. Members of the
# describe list are language specific 'Type' declarations
#
def p_describe_block(self, p):
"""describe_block : modifiers DESCRIBE '{' describe_list '}' ';'"""
children = ListFromConcat(p[1], p[4])
p[0] = self.BuildProduction('Describe', p, 2, children)
if self.parse_debug: DumpReduction('describe_block', p)
# Recover from describe error and continue parsing at the next top match.
def p_describe_error(self, p):
"""describe_list : error describe_list"""
p[0] = []
def p_describe_list(self, p):
"""describe_list : modifiers SYMBOL ';' describe_list
| modifiers ENUM ';' describe_list
| modifiers STRUCT ';' describe_list
| modifiers TYPEDEF ';' describe_list
| """
if len(p) > 1:
Type = self.BuildNamed('Type', p, 2, p[1])
p[0] = ListFromConcat(Type, p[4])
#
# Constant Values (integer, value)
#
# Constant values can be found at various levels. A Constant value is returns
# as the string value after validated against a FLOAT, HEX, INT, OCT or
# STRING pattern as appropriate.
#
def p_value(self, p):
"""value : FLOAT
| HEX
| INT
| OCT
| STRING"""
p[0] = p[1]
if self.parse_debug: DumpReduction('value', p)
def p_value_lshift(self, p):
"""value : integer LSHIFT INT"""
p[0] = "%s << %s" % (p[1], p[3])
if self.parse_debug: DumpReduction('value', p)
# Integers are numbers which may not be floats used in cases like array sizes.
def p_integer(self, p):
"""integer : HEX
| INT
| OCT"""
p[0] = p[1]
if self.parse_debug: DumpReduction('integer', p)
#
# Expression
#
# A simple arithmetic expression.
#
precedence = (
('left','|','&','^'),
('left','LSHIFT','RSHIFT'),
('left','+','-'),
('left','*','/'),
('right','UMINUS','~'),
)
def p_expression_binop(self, p):
"""expression : expression LSHIFT expression
| expression RSHIFT expression
| expression '|' expression
| expression '&' expression
| expression '^' expression
| expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression"""
p[0] = "%s %s %s" % (str(p[1]), str(p[2]), str(p[3]))
if self.parse_debug: DumpReduction('expression_binop', p)
def p_expression_unop(self, p):
"""expression : '-' expression %prec UMINUS
| '~' expression %prec '~'"""
p[0] = "%s%s" % (str(p[1]), str(p[2]))
if self.parse_debug: DumpReduction('expression_unop', p)
def p_expression_term(self, p):
"""expression : '(' expression ')'"""
p[0] = "%s%s%s" % (str(p[1]), str(p[2]), str(p[3]))
if self.parse_debug: DumpReduction('expression_term', p)
def p_expression_symbol(self, p):
"""expression : SYMBOL"""
p[0] = p[1]
if self.parse_debug: DumpReduction('expression_symbol', p)
def p_expression_integer(self, p):
"""expression : integer"""
p[0] = p[1]
if self.parse_debug: DumpReduction('expression_integer', p)
#
# Array List
#
# Defined a list of array sizes (if any).
#
def p_arrays(self, p):
"""arrays : '[' ']' arrays
| '[' integer ']' arrays
| """
# If there are 3 tokens plus a return slot it is an unsized array
if len(p) == 4:
array = self.BuildProduction('Array', p, 1)
p[0] = ListFromConcat(array, p[3])
# If there are 4 tokens plus a return slot it is a fixed array
elif len(p) == 5:
count = self.BuildAttribute('FIXED', p[2])
array = self.BuildProduction('Array', p, 2, [count])
p[0] = ListFromConcat(array, p[4])
# If there is only a return slot, do not fill it for this terminator.
elif len(p) == 1: return
if self.parse_debug: DumpReduction('arrays', p)
# An identifier is a legal value for a parameter or attribute name. Lots of
# existing IDL files use "callback" as a parameter/attribute name, so we allow
# a SYMBOL or the CALLBACK keyword.
def p_identifier(self, p):
"""identifier : SYMBOL
| CALLBACK"""
p[0] = p[1]
# Save the line number of the underlying token (otherwise it gets
# discarded), since we use it in the productions with an identifier in
# them.
p.set_lineno(0, p.lineno(1))
#
# Union
#
# A union allows multiple choices of types for a parameter or member.
#
def p_union_option(self, p):
"""union_option : modifiers SYMBOL arrays"""
typeref = self.BuildAttribute('TYPEREF', p[2])
children = ListFromConcat(p[1], typeref, p[3])
p[0] = self.BuildProduction('Option', p, 2, children)
def p_union_list(self, p):
"""union_list : union_option OR union_list
| union_option"""
if len(p) > 2:
p[0] = ListFromConcat(p[1], p[3])
else:
p[0] = p[1]
#
# Parameter List
#
# A parameter list is a collection of arguments which are passed to a
# function.
#
def p_param_list(self, p):
"""param_list : '(' param_item param_cont ')'
| '(' ')' """
if len(p) > 3:
args = ListFromConcat(p[2], p[3])
else:
args = []
p[0] = self.BuildProduction('Callspec', p, 1, args)
if self.parse_debug: DumpReduction('param_list', p)
def p_param_item(self, p):
"""param_item : modifiers optional typeref arrays identifier"""
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], p[2], typeref, p[4])
p[0] = self.BuildNamed('Param', p, 5, children)
if self.parse_debug: DumpReduction('param_item', p)
def p_param_item_union(self, p):
"""param_item : modifiers optional '(' union_list ')' identifier"""
union = self.BuildAttribute('Union', True)
children = ListFromConcat(p[1], p[2], p[4], union)
p[0] = self.BuildNamed('Param', p, 6, children)
if self.parse_debug: DumpReduction('param_item', p)
def p_optional(self, p):
"""optional : OPTIONAL
| """
if len(p) == 2:
p[0] = self.BuildAttribute('OPTIONAL', True)
def p_param_cont(self, p):
"""param_cont : ',' param_item param_cont
| """
if len(p) > 1:
p[0] = ListFromConcat(p[2], p[3])
if self.parse_debug: DumpReduction('param_cont', p)
def p_param_error(self, p):
"""param_cont : error param_cont"""
p[0] = p[2]
#
# Typedef
#
# A typedef creates a new referencable type. The typedef can specify an array
# definition as well as a function declaration.
#
def p_typedef_data(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL SYMBOL ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref)
p[0] = self.BuildNamed('Typedef', p, 4, children)
if self.parse_debug: DumpReduction('typedef_data', p)
def p_typedef_array(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL arrays SYMBOL ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref, p[4])
p[0] = self.BuildNamed('Typedef', p, 5, children)
if self.parse_debug: DumpReduction('typedef_array', p)
def p_typedef_func(self, p):
"""typedef_decl : modifiers TYPEDEF SYMBOL SYMBOL param_list ';' """
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], typeref, p[5])
p[0] = self.BuildNamed('Typedef', p, 4, children)
if self.parse_debug: DumpReduction('typedef_func', p)
#
# Enumeration
#
# An enumeration is a set of named integer constants. An enumeration
# is valid type which can be referenced in other definitions.
#
def p_enum_block(self, p):
"""enum_block : modifiers ENUM SYMBOL '{' enum_list '}' ';'"""
p[0] = self.BuildNamed('Enum', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('enum_block', p)
# Recover from enum error and continue parsing at the next top match.
def p_enum_errorA(self, p):
"""enum_block : modifiers ENUM error '{' enum_list '}' ';'"""
p[0] = []
def p_enum_errorB(self, p):
"""enum_block : modifiers ENUM error ';'"""
p[0] = []
def p_enum_list(self, p):
"""enum_list : modifiers SYMBOL '=' expression enum_cont
| modifiers SYMBOL enum_cont"""
if len(p) > 4:
val = self.BuildAttribute('VALUE', p[4])
enum = self.BuildNamed('EnumItem', p, 2, ListFromConcat(val, p[1]))
p[0] = ListFromConcat(enum, p[5])
else:
enum = self.BuildNamed('EnumItem', p, 2, p[1])
p[0] = ListFromConcat(enum, p[3])
if self.parse_debug: DumpReduction('enum_list', p)
def p_enum_cont(self, p):
"""enum_cont : ',' enum_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('enum_cont', p)
def p_enum_cont_error(self, p):
"""enum_cont : error enum_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('enum_error', p)
#
# Label
#
# A label is a special kind of enumeration which allows us to go from a
# set of labels
#
def p_label_block(self, p):
"""label_block : modifiers LABEL SYMBOL '{' label_list '}' ';'"""
p[0] = self.BuildNamed('Label', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('label_block', p)
def p_label_list(self, p):
"""label_list : modifiers SYMBOL '=' FLOAT label_cont"""
val = self.BuildAttribute('VALUE', p[4])
label = self.BuildNamed('LabelItem', p, 2, ListFromConcat(val, p[1]))
p[0] = ListFromConcat(label, p[5])
if self.parse_debug: DumpReduction('label_list', p)
def p_label_cont(self, p):
"""label_cont : ',' label_list
|"""
if len(p) > 1: p[0] = p[2]
if self.parse_debug: DumpReduction('label_cont', p)
def p_label_cont_error(self, p):
"""label_cont : error label_cont"""
p[0] = p[2]
if self.parse_debug: DumpReduction('label_error', p)
#
# Members
#
# A member attribute or function of a struct or interface.
#
def p_member_attribute(self, p):
"""member_attribute : modifiers typeref arrays questionmark identifier"""
typeref = self.BuildAttribute('TYPEREF', p[2])
children = ListFromConcat(p[1], typeref, p[3], p[4])
p[0] = self.BuildNamed('Member', p, 5, children)
if self.parse_debug: DumpReduction('attribute', p)
def p_member_attribute_union(self, p):
"""member_attribute : modifiers '(' union_list ')' questionmark identifier"""
union = self.BuildAttribute('Union', True)
children = ListFromConcat(p[1], p[3], p[5], union)
p[0] = self.BuildNamed('Member', p, 6, children)
if self.parse_debug: DumpReduction('attribute', p)
def p_member_function(self, p):
"""member_function : modifiers static typeref arrays SYMBOL param_list"""
typeref = self.BuildAttribute('TYPEREF', p[3])
children = ListFromConcat(p[1], p[2], typeref, p[4], p[6])
p[0] = self.BuildNamed('Member', p, 5, children)
if self.parse_debug: DumpReduction('function', p)
def p_static(self, p):
"""static : STATIC
| """
if len(p) == 2:
p[0] = self.BuildAttribute('STATIC', True)
def p_questionmark(self, p):
"""questionmark : '?'
| """
if len(p) == 2:
p[0] = self.BuildAttribute('OPTIONAL', True)
#
# Interface
#
# An interface is a named collection of functions.
#
def p_interface_block(self, p):
"""interface_block : modifiers INTERFACE SYMBOL '{' interface_list '}' ';'"""
p[0] = self.BuildNamed('Interface', p, 3, ListFromConcat(p[1], p[5]))
if self.parse_debug: DumpReduction('interface_block', p)
def p_interface_error(self, p):
"""interface_block : modifiers INTERFACE error '{' interface_list '}' ';'"""
p[0] = []
def p_interface_list(self, p):
"""interface_list : member_function ';' interface_list
| """
if len(p) > 1 :
p[0] = ListFromConcat(p[1], p[3])
if self.parse_debug: DumpReduction('interface_list', p)
#
# Struct
#
# A struct is a named collection of members which in turn reference other
# types. The struct is a referencable type.
#
def p_struct_block(self, p):
"""struct_block : modifiers STRUCT SYMBOL '{' struct_list '}' ';'"""
children = ListFromConcat(p[1], p[5])
p[0] = self.BuildNamed('Struct', p, 3, children)
if self.parse_debug: DumpReduction('struct_block', p)
# Recover from struct error and continue parsing at the next top match.
def p_struct_error(self, p):
"""enum_block : modifiers STRUCT error '{' struct_list '}' ';'"""
p[0] = []
def p_struct_list(self, p):
"""struct_list : member_attribute ';' struct_list
| member_function ';' struct_list
|"""
if len(p) > 1: p[0] = ListFromConcat(p[1], p[3])
#
# Parser Errors
#
# p_error is called whenever the parser can not find a pattern match for
# a set of items from the current state. The p_error function defined here
# is triggered logging an error, and parsing recover happens as the
# p_<type>_error functions defined above are called. This allows the parser
# to continue so as to capture more than one error per file.
#
def p_error(self, t):
filename = self.lexobj.filename
self.parse_errors += 1
if t:
lineno = t.lineno
pos = t.lexpos
prev = self.yaccobj.symstack[-1]
if type(prev) == lex.LexToken:
msg = "Unexpected %s after %s." % (
TokenTypeName(t), TokenTypeName(prev))
else:
msg = "Unexpected %s." % (t.value)
else:
lineno = self.last.lineno
pos = self.last.lexpos
msg = "Unexpected end of file after %s." % TokenTypeName(self.last)
self.yaccobj.restart()
# Attempt to remap the error to a friendlier form
if msg in ERROR_REMAP:
msg = ERROR_REMAP[msg]
# Log the error
ErrOut.LogLine(filename, lineno, pos, msg)
def Warn(self, node, msg):
WarnOut.LogLine(node.filename, node.lineno, node.pos, msg)
self.parse_warnings += 1
def __init__(self):
IDLLexer.__init__(self)
self.yaccobj = yacc.yacc(module=self, tabmodule=None, debug=False,
optimize=0, write_tables=0)
self.build_debug = GetOption('build_debug')
self.parse_debug = GetOption('parse_debug')
self.token_debug = GetOption('token_debug')
self.verbose = GetOption('verbose')
self.parse_errors = 0
#
# Tokenizer
#
# The token function returns the next token provided by IDLLexer for matching
# against the leaf paterns.
#
def token(self):
tok = self.lexobj.token()
if tok:
self.last = tok
if self.token_debug:
InfoOut.Log("TOKEN %s(%s)" % (tok.type, tok.value))
return tok
#
# BuildProduction
#
# Production is the set of items sent to a grammar rule resulting in a new
# item being returned.
#
# p - Is the Yacc production object containing the stack of items
# index - Index into the production of the name for the item being produced.
# cls - The type of item being producted
# childlist - The children of the new item
def BuildProduction(self, cls, p, index, childlist=None):
if not childlist: childlist = []
filename = self.lexobj.filename
lineno = p.lineno(index)
pos = p.lexpos(index)
out = IDLNode(cls, filename, lineno, pos, childlist)
if self.build_debug:
InfoOut.Log("Building %s" % out)
return out
def BuildNamed(self, cls, p, index, childlist=None):
if not childlist: childlist = []
childlist.append(self.BuildAttribute('NAME', p[index]))
return self.BuildProduction(cls, p, index, childlist)
def BuildComment(self, cls, p, index):
name = p[index]
# Remove comment markers
lines = []
if name[:2] == '//':
# For C++ style, remove any leading whitespace and the '//' marker from
# each line.
form = 'cc'
for line in name.split('\n'):
start = line.find('//')
lines.append(line[start+2:])
else:
# For C style, remove ending '*/''
form = 'c'
for line in name[:-2].split('\n'):
# Remove characters until start marker for this line '*' if found
# otherwise it should be blank.
offs = line.find('*')
if offs >= 0:
line = line[offs + 1:].rstrip()
else:
line = ''
lines.append(line)
name = '\n'.join(lines)
childlist = [self.BuildAttribute('NAME', name),
self.BuildAttribute('FORM', form)]
return self.BuildProduction(cls, p, index, childlist)
#
# BuildAttribute
#
# An ExtendedAttribute is a special production that results in a property
# which is applied to the adjacent item. Attributes have no children and
# instead represent key/value pairs.
#
def BuildAttribute(self, key, val):
return IDLAttribute(key, val)
#
# ParseData
#
# Attempts to parse the current data loaded in the lexer.
#
def ParseData(self, data, filename='<Internal>'):
self.SetData(filename, data)
try:
self.parse_errors = 0
self.parse_warnings = 0
return self.yaccobj.parse(lexer=self)
except lex.LexError as le:
ErrOut.Log(str(le))
return []
#
# ParseFile
#
# Loads a new file into the lexer and attemps to parse it.
#
def ParseFile(self, filename):
date = time.ctime(os.path.getmtime(filename))
data = open(filename).read()
if self.verbose:
InfoOut.Log("Parsing %s" % filename)
try:
out = self.ParseData(data, filename)
# If we have a src root specified, remove it from the path
srcroot = GetOption('srcroot')
if srcroot and filename.find(srcroot) == 0:
filename = filename[len(srcroot) + 1:]
filenode = IDLFile(filename, out, self.parse_errors + self.lex_errors)
filenode.SetProperty('DATETIME', date)
return filenode
except Exception as e:
ErrOut.LogLine(filename, self.last.lineno, self.last.lexpos,
'Internal parsing error - %s.' % str(e))
raise
#
# Flatten Tree
#
# Flattens the tree of IDLNodes for use in testing.
#
def FlattenTree(node):
add_self = False
out = []
for child in node.GetChildren():
if child.IsA('Comment'):
add_self = True
else:
out.extend(FlattenTree(child))
if add_self:
out = [str(node)] + out
return out
def TestErrors(filename, filenode):
nodelist = filenode.GetChildren()
lexer = IDLLexer()
data = open(filename).read()
lexer.SetData(filename, data)
pass_comments = []
fail_comments = []
while True:
tok = lexer.lexobj.token()
if tok == None: break
if tok.type == 'COMMENT':
args = tok.value[3:-3].split()
if args[0] == 'OK':
pass_comments.append((tok.lineno, ' '.join(args[1:])))
else:
if args[0] == 'FAIL':
fail_comments.append((tok.lineno, ' '.join(args[1:])))
obj_list = []
for node in nodelist:
obj_list.extend(FlattenTree(node))
errors = 0
#
# Check for expected successes
#
obj_cnt = len(obj_list)
pass_cnt = len(pass_comments)
if obj_cnt != pass_cnt:
InfoOut.Log("Mismatched pass (%d) vs. nodes built (%d)."
% (pass_cnt, obj_cnt))
InfoOut.Log("PASS: %s" % [x[1] for x in pass_comments])
InfoOut.Log("OBJS: %s" % obj_list)
errors += 1
if pass_cnt > obj_cnt: pass_cnt = obj_cnt
for i in range(pass_cnt):
line, comment = pass_comments[i]
if obj_list[i] != comment:
ErrOut.LogLine(filename, line, None, "OBJ %s : EXPECTED %s\n" %
(obj_list[i], comment))
errors += 1
#
# Check for expected errors
#
err_list = ErrOut.DrainLog()
err_cnt = len(err_list)
fail_cnt = len(fail_comments)
if err_cnt != fail_cnt:
InfoOut.Log("Mismatched fail (%d) vs. errors seen (%d)."
% (fail_cnt, err_cnt))
InfoOut.Log("FAIL: %s" % [x[1] for x in fail_comments])
InfoOut.Log("ERRS: %s" % err_list)
errors += 1
if fail_cnt > err_cnt: fail_cnt = err_cnt
for i in range(fail_cnt):
line, comment = fail_comments[i]
err = err_list[i].strip()
if err_list[i] != comment:
ErrOut.Log("%s(%d) Error\n\tERROR : %s\n\tEXPECT: %s" % (
filename, line, err_list[i], comment))
errors += 1
# Clear the error list for the next run
err_list = []
return errors
def TestFile(parser, filename):
# Capture errors instead of reporting them so we can compare them
# with the expected errors.
ErrOut.SetConsole(False)
ErrOut.SetCapture(True)
filenode = parser.ParseFile(filename)
# Renable output
ErrOut.SetConsole(True)
ErrOut.SetCapture(False)
# Compare captured errors
return TestErrors(filename, filenode)
def TestErrorFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_parser', '*.idl')
filenames = glob.glob(idldir)
parser = IDLParser()
total_errs = 0
for filename in filenames:
if filter and filename not in filter: continue
errs = TestFile(parser, filename)
if errs:
ErrOut.Log("%s test failed with %d error(s)." % (filename, errs))
total_errs += errs
if total_errs:
ErrOut.Log("Failed parsing test.")
else:
InfoOut.Log("Passed parsing test.")
return total_errs
def TestNamespaceFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_namespace', '*.idl')
filenames = glob.glob(idldir)
testnames = []
for filename in filenames:
if filter and filename not in filter: continue
testnames.append(filename)
# If we have no files to test, then skip this test
if not testnames:
InfoOut.Log('No files to test for namespace.')
return 0
InfoOut.SetConsole(False)
ast = ParseFiles(testnames)
InfoOut.SetConsole(True)
errs = ast.GetProperty('ERRORS')
if errs:
ErrOut.Log("Failed namespace test.")
else:
InfoOut.Log("Passed namespace test.")
return errs
def FindVersionError(releases, node):
err_cnt = 0
if node.IsA('Interface', 'Struct'):
comment_list = []
comment = node.GetOneOf('Comment')
if comment and comment.GetName()[:4] == 'REL:':
comment_list = comment.GetName()[5:].strip().split(' ')
first_list = [node.first_release[rel] for rel in releases]
first_list = sorted(set(first_list))
if first_list != comment_list:
node.Error("Mismatch in releases: %s vs %s." % (
comment_list, first_list))
err_cnt += 1
for child in node.GetChildren():
err_cnt += FindVersionError(releases, child)
return err_cnt
def TestVersionFiles(filter):
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_version', '*.idl')
filenames = glob.glob(idldir)
testnames = []
for filename in filenames:
if filter and filename not in filter: continue
testnames.append(filename)
# If we have no files to test, then skip this test
if not testnames:
InfoOut.Log('No files to test for version.')
return 0
ast = ParseFiles(testnames)
errs = FindVersionError(ast.releases, ast)
errs += ast.errors
if errs:
ErrOut.Log("Failed version test.")
else:
InfoOut.Log("Passed version test.")
return errs
default_dirs = ['.', 'trusted', 'dev', 'private']
def ParseFiles(filenames):
parser = IDLParser()
filenodes = []
if not filenames:
filenames = []
srcroot = GetOption('srcroot')
dirs = default_dirs
if GetOption('include_private'):
dirs += ['private']
for dirname in dirs:
srcdir = os.path.join(srcroot, dirname, '*.idl')
srcdir = os.path.normpath(srcdir)
filenames += sorted(glob.glob(srcdir))
if not filenames:
ErrOut.Log('No sources provided.')
for filename in filenames:
filenode = parser.ParseFile(filename)
filenodes.append(filenode)
ast = IDLAst(filenodes)
if GetOption('dump_tree'): ast.Dump(0)
Lint(ast)
return ast
def Main(args):
filenames = ParseOptions(args)
# If testing...
if GetOption('test'):
errs = TestErrorFiles(filenames)
errs = TestNamespaceFiles(filenames)
errs = TestVersionFiles(filenames)
if errs:
ErrOut.Log("Parser failed with %d errors." % errs)
return -1
return 0
# Otherwise, build the AST
ast = ParseFiles(filenames)
errs = ast.GetProperty('ERRORS')
if errs:
ErrOut.Log('Found %d error(s).' % errs);
InfoOut.Log("%d files processed." % len(filenames))
return errs
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
luxnovalabs/enjigo_door | web_interface/django/templatetags/static.py | 114 | 4022 | try:
from urllib.parse import urljoin
except ImportError: # Python 2
from urlparse import urljoin
from django import template
from django.template.base import Node
from django.utils.encoding import iri_to_uri
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
class StaticNode(Node):
def __init__(self, varname=None, path=None):
if path is None:
raise template.TemplateSyntaxError(
"Static template nodes must be given a path to return.")
self.path = path
self.varname = varname
def url(self, context):
path = self.path.resolve(context)
return self.handle_simple(path)
def render(self, context):
url = self.url(context)
if self.varname is None:
return url
context[self.varname] = url
return ''
@classmethod
def handle_simple(cls, path):
return urljoin(PrefixNode.handle_simple("STATIC_URL"), path)
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse prefix node and return a Node.
"""
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError(
"'%s' takes at least one argument (path to file)" % bits[0])
path = parser.compile_filter(bits[1])
if len(bits) >= 2 and bits[-2] == 'as':
varname = bits[3]
else:
varname = None
return cls(varname, path)
@register.tag('static')
def do_static(parser, token):
"""
Joins the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
return StaticNode.handle_token(parser, token)
def static(path):
return StaticNode.handle_simple(path)
| unlicense |
Sri20/Introduction-Programming-Python | Solutions/Module7TaxesChallengeSolution.py | 19 | 1637 | #Declare and initialize your variables
country = ""
province = ""
orderTotal = 0
totalWithTax = 0
#I am declaring variables to hold the tax values used in the calculations
#That way if a tax rate changes, I only have to change it in one place instead
#of searching through my code to see where I had a specific numeric value and updating it
GST = .05
HST = .13
PST = .06
#Ask the user what country they are from
country = input("What country are you from? " )
#if they are from Canada ask which province...don't forget they may enter Canada as CANADA, Canada, canada, CAnada
#so convert the string to lowercase before you do the comparison
if country.lower() == "canada" :
province = input("Which province are you from? ")
#ask for the order total
orderTotal = float(input("What is your order total? "))
#Now add the taxes
#first check if they are from canada
if country.lower() == "canada" :
#if they are from canada, we have to change the calculation based on the province they specified
if province.lower() == "alberta" :
orderTotal = orderTotal + orderTotal * GST
elif province.lower() == "ontario" or province.lower() == "new brunswick" or province.lower() == "nova scotia" :
orderTotal = orderTotal + orderTotal * HST
else :
orderTotal = orderTotal + orderTotal * PST + orderTotal * GST
#if they are not from Canada there is no tax, so the amount they entered is the total with tax
#and no modification to orderTotal is required
#Now display the total with taxes to the user, don't forget to format the number
print("Your total including taxes comes to $%.2f " % orderTotal)
| apache-2.0 |
mdclyburn/ardupilot | Tools/LogAnalyzer/DataflashLog.py | 83 | 27803 | #
# Code to abstract the parsing of APM Dataflash log files, currently only used by the LogAnalyzer
#
# Initial code by Andrew Chapman (amchapman@gmail.com), 16th Jan 2014
#
from __future__ import print_function
import collections
import os
import numpy
import bisect
import sys
import ctypes
class Format(object):
'''Data channel format as specified by the FMT lines in the log file'''
def __init__(self,msgType,msgLen,name,types,labels):
self.NAME = 'FMT'
self.msgType = msgType
self.msgLen = msgLen
self.name = name
self.types = types
self.labels = labels.split(',')
def __str__(self):
return "%8s %s" % (self.name, `self.labels`)
@staticmethod
def trycastToFormatType(value,valueType):
'''using format characters from libraries/DataFlash/DataFlash.h to cast strings to basic python int/float/string types
tries a cast, if it does not work, well, acceptable as the text logs do not match the format, e.g. MODE is expected to be int'''
try:
if valueType in "fcCeEL":
return float(value)
elif valueType in "bBhHiIM":
return int(value)
elif valueType in "nNZ":
return str(value)
except:
pass
return value
def to_class(self):
members = dict(
NAME = self.name,
labels = self.labels[:],
)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels[:]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
p = property(lambda x:getattr(x, attributename),
lambda x, v:setattr(x,attributename, Format.trycastToFormatType(v,format)))
members[propertyname] = p
members[attributename] = None
createproperty(label, _type)
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,'_'+k)) for k in x.labels]))
def init(a, *x):
if len(x) != len(a.labels):
raise ValueError("Invalid Length")
#print(list(zip(a.labels, x)))
for (l,v) in zip(a.labels, x):
try:
setattr(a, l, v)
except Exception as e:
print("{} {} {} failed".format(a,l,v))
print(e)
members['__init__'] = init
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(object,),
members
)
#print(members)
return cls
class logheader(ctypes.LittleEndianStructure):
_fields_ = [ \
('head1', ctypes.c_uint8),
('head2', ctypes.c_uint8),
('msgid', ctypes.c_uint8),
]
def __repr__(self):
return "<logheader head1=0x{self.head1:x} head2=0x{self.head2:x} msgid=0x{self.msgid:x} ({self.msgid})>".format(self=self)
class BinaryFormat(ctypes.LittleEndianStructure):
NAME = 'FMT'
MSG = 128
SIZE = 0
FIELD_FORMAT = {
'b': ctypes.c_int8,
'B': ctypes.c_uint8,
'h': ctypes.c_int16,
'H': ctypes.c_uint16,
'i': ctypes.c_int32,
'I': ctypes.c_uint32,
'f': ctypes.c_float,
'd': ctypes.c_double,
'n': ctypes.c_char * 4,
'N': ctypes.c_char * 16,
'Z': ctypes.c_char * 64,
'c': ctypes.c_int16,# * 100,
'C': ctypes.c_uint16,# * 100,
'e': ctypes.c_int32,# * 100,
'E': ctypes.c_uint32,# * 100,
'L': ctypes.c_int32,
'M': ctypes.c_uint8,
'q': ctypes.c_int64,
'Q': ctypes.c_uint64,
}
FIELD_SCALE = {
'c': 100,
'C': 100,
'e': 100,
'E': 100,
}
_packed_ = True
_fields_ = [ \
('head', logheader),
('type', ctypes.c_uint8),
('length', ctypes.c_uint8),
('name', ctypes.c_char * 4),
('types', ctypes.c_char * 16),
('labels', ctypes.c_char * 64),
]
def __repr__(self):
return "<{cls} {data}>".format(cls=self.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(self,k)) for (k,_) in self._fields_[1:]]))
def to_class(self):
members = dict(
NAME = self.name,
MSG = self.type,
SIZE = self.length,
labels = self.labels.split(",") if self.labels else [],
_pack_ = True)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels.split(",")
if self.labels and (len(fieldtypes) != len(fieldlabels)):
print("Broken FMT message for {} .. ignoring".format(self.name), file=sys.stderr)
return None
fields = [('head',logheader)]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
scale = BinaryFormat.FIELD_SCALE.get(format, None)
p = property(lambda x:getattr(x, attributename))
if scale is not None:
p = property(lambda x:getattr(x, attributename) / scale)
members[propertyname] = p
try:
fields.append((attributename, BinaryFormat.FIELD_FORMAT[format]))
except KeyError:
print('ERROR: Failed to add FMT type: {}, with format: {}'.format(attributename, format))
raise
createproperty(label, _type)
members['_fields_'] = fields
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,k)) for k in x.labels]))
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(ctypes.LittleEndianStructure,),
members
)
if ctypes.sizeof(cls) != cls.SIZE:
print("size mismatch for {} expected {} got {}".format(cls, ctypes.sizeof(cls), cls.SIZE), file=sys.stderr)
# for i in cls.labels:
# print("{} = {}".format(i,getattr(cls,'_'+i)))
return None
return cls
BinaryFormat.SIZE = ctypes.sizeof(BinaryFormat)
class Channel(object):
'''storage for a single stream of data, i.e. all GPS.RelAlt values'''
# TODO: rethink data storage, but do more thorough regression testing before refactoring it
# TODO: store data as a scipy spline curve so we can more easily interpolate and sample the slope?
def __init__(self):
self.dictData = {} # dict of linenum->value # store dupe data in dict and list for now, until we decide which is the better way to go
self.listData = [] # list of (linenum,value) # store dupe data in dict and list for now, until we decide which is the better way to go
def getSegment(self, startLine, endLine):
'''returns a segment of this data (from startLine to endLine, inclusive) as a new Channel instance'''
segment = Channel()
segment.dictData = {k:v for k,v in self.dictData.iteritems() if k >= startLine and k <= endLine}
return segment
def min(self):
return min(self.dictData.values())
def max(self):
return max(self.dictData.values())
def avg(self):
return numpy.mean(self.dictData.values())
def getNearestValueFwd(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
while index<len(self.listData):
line = self.listData[index][0]
#print "Looking forwards for nearest value to line number %d, starting at line %d" % (lineNumber,line) # TEMP
if line >= lineNumber:
return (self.listData[index][1],line)
index += 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValueBack(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999)) - 1
while index>=0:
line = self.listData[index][0]
#print "Looking backwards for nearest value to line number %d, starting at line %d" % (lineNumber,line) # TEMP
if line <= lineNumber:
return (self.listData[index][1],line)
index -= 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValue(self, lineNumber, lookForwards=True):
'''find the nearest data value to the given lineNumber, defaults to first looking forwards. Returns (value,lineNumber)'''
if lookForwards:
try:
return self.getNearestValueFwd(lineNumber)
except:
return self.getNearestValueBack(lineNumber)
else:
try:
return self.getNearestValueBack(lineNumber)
except:
return self.getNearestValueFwd(lineNumber)
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getInterpolatedValue(self, lineNumber):
(prevValue,prevValueLine) = self.getNearestValue(lineNumber, lookForwards=False)
(nextValue,nextValueLine) = self.getNearestValue(lineNumber, lookForwards=True)
if prevValueLine == nextValueLine:
return prevValue
weight = (lineNumber-prevValueLine) / float(nextValueLine-prevValueLine)
return ((weight*prevValue) + ((1-weight)*nextValue))
def getIndexOf(self, lineNumber):
'''returns the index within this channel's listData of the given lineNumber, or raises an Exception if not found'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
#print "INDEX of line %d: %d" % (lineNumber,index)
#print "self.listData[index][0]: %d" % self.listData[index][0]
if (self.listData[index][0] == lineNumber):
return index
else:
raise Exception("Error finding index for line %d" % lineNumber)
class LogIterator:
'''Smart iterator that can move through a log by line number and maintain an index into the nearest values of all data channels'''
# TODO: LogIterator currently indexes the next available value rather than the nearest value, we should make it configurable between next/nearest
class LogIteratorSubValue:
'''syntactic sugar to allow access by LogIterator[lineLabel][dataLabel]'''
logdata = None
iterators = None
lineLabel = None
def __init__(self, logdata, iterators, lineLabel):
self.logdata = logdata
self.lineLabel = lineLabel
self.iterators = iterators
def __getitem__(self, dataLabel):
index = self.iterators[self.lineLabel][0]
return self.logdata.channels[self.lineLabel][dataLabel].listData[index][1]
iterators = {} # lineLabel -> (listIndex,lineNumber)
logdata = None
currentLine = None
def __init__(self, logdata, lineNumber=0):
self.logdata = logdata
self.currentLine = lineNumber
for lineLabel in self.logdata.formats:
if lineLabel in self.logdata.channels:
self.iterators[lineLabel] = ()
self.jump(lineNumber)
def __iter__(self):
return self
def __getitem__(self, lineLabel):
return LogIterator.LogIteratorSubValue(self.logdata, self.iterators, lineLabel)
def next(self):
'''increment iterator to next log line'''
self.currentLine += 1
if self.currentLine > self.logdata.lineCount:
return self
for lineLabel in self.iterators.keys():
# check if the currentLine has gone past our the line we're pointing to for this type of data
dataLabel = self.logdata.formats[lineLabel].labels[0]
(index, lineNumber) = self.iterators[lineLabel]
# if so, and it is not the last entry in the log, then increment the indices for all dataLabels under that lineLabel
if (self.currentLine > lineNumber) and (index < len(self.logdata.channels[lineLabel][dataLabel].listData)-1):
index += 1
lineNumber = self.logdata.channels[lineLabel][dataLabel].listData[index][0]
self.iterators[lineLabel] = (index,lineNumber)
return self
def jump(self, lineNumber):
'''jump iterator to specified log line'''
self.currentLine = lineNumber
for lineLabel in self.iterators.keys():
dataLabel = self.logdata.formats[lineLabel].labels[0]
(value,lineNumber) = self.logdata.channels[lineLabel][dataLabel].getNearestValue(self.currentLine)
self.iterators[lineLabel] = (self.logdata.channels[lineLabel][dataLabel].getIndexOf(lineNumber), lineNumber)
class DataflashLogHelper:
'''helper functions for dealing with log data, put here to keep DataflashLog class as a simple parser and data store'''
@staticmethod
def getTimeAtLine(logdata, lineNumber):
'''returns the nearest GPS timestamp in milliseconds after the given line number'''
if not "GPS" in logdata.channels:
raise Exception("no GPS log data found")
# older logs use 'TIme', newer logs use 'TimeMS'
timeLabel = "TimeMS"
if "Time" in logdata.channels["GPS"]:
timeLabel = "Time"
while lineNumber <= logdata.lineCount:
if lineNumber in logdata.channels["GPS"][timeLabel].dictData:
return logdata.channels["GPS"][timeLabel].dictData[lineNumber]
lineNumber = lineNumber + 1
sys.stderr.write("didn't find GPS data for " + str(lineNumber) + " - using maxtime\n")
return logdata.channels["GPS"][timeLabel].max()
@staticmethod
def findLoiterChunks(logdata, minLengthSeconds=0, noRCInputs=True):
'''returns a list of (to,from) pairs defining sections of the log which are in loiter mode. Ordered from longest to shortest in time. If noRCInputs == True it only returns chunks with no control inputs'''
# TODO: implement noRCInputs handling when identifying stable loiter chunks, for now we're ignoring it
def chunkSizeCompare(chunk1, chunk2):
chunk1Len = chunk1[1]-chunk1[0]
chunk2Len = chunk2[1]-chunk2[0]
if chunk1Len == chunk2Len:
return 0
elif chunk1Len > chunk2Len:
return -1
else:
return 1
od = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0]))
chunks = []
for i in range(len(od.keys())):
if od.values()[i][0] == "LOITER":
startLine = od.keys()[i]
endLine = None
if i == len(od.keys())-1:
endLine = logdata.lineCount
else:
endLine = od.keys()[i+1]-1
chunkTimeSeconds = (DataflashLogHelper.getTimeAtLine(logdata,endLine)-DataflashLogHelper.getTimeAtLine(logdata,startLine)+1) / 1000.0
if chunkTimeSeconds > minLengthSeconds:
chunks.append((startLine,endLine))
#print "LOITER chunk: %d to %d, %d lines" % (startLine,endLine,endLine-startLine+1)
#print " (time %d to %d, %d seconds)" % (DataflashLogHelper.getTimeAtLine(logdata,startLine), DataflashLogHelper.getTimeAtLine(logdata,endLine), chunkTimeSeconds)
chunks.sort(chunkSizeCompare)
return chunks
@staticmethod
def isLogEmpty(logdata):
'''returns an human readable error string if the log is essentially empty, otherwise returns None'''
# naive check for now, see if the throttle output was ever above 20%
throttleThreshold = 20
if logdata.vehicleType == "ArduCopter":
throttleThreshold = 200 # copter uses 0-1000, plane+rover use 0-100
if "CTUN" in logdata.channels:
maxThrottle = logdata.channels["CTUN"]["ThrOut"].max()
if maxThrottle < throttleThreshold:
return "Throttle never above 20%"
return None
class DataflashLog(object):
'''APM Dataflash log file reader and container class. Keep this simple, add more advanced or specific functions to DataflashLogHelper class'''
knownHardwareTypes = ["APM", "PX4", "MPNG"]
intTypes = "bBhHiIM"
floatTypes = "fcCeEL"
charTypes = "nNZ"
def __init__(self, logfile=None, format="auto", ignoreBadlines=False):
self.filename = None
self.vehicleType = "" # ArduCopter, ArduPlane, ArduRover, etc, verbatim as given by header
self.firmwareVersion = ""
self.firmwareHash = ""
self.freeRAM = 0
self.hardwareType = "" # APM 1, APM 2, PX4, MPNG, etc What is VRBrain? BeagleBone, etc? Needs more testing
self.formats = {} # name -> Format
self.parameters = {} # token -> value
self.messages = {} # lineNum -> message
self.modeChanges = {} # lineNum -> (mode,value)
self.channels = {} # lineLabel -> {dataLabel:Channel}
self.filesizeKB = 0
self.durationSecs = 0
self.lineCount = 0
self.skippedLines = 0
if logfile:
self.read(logfile, format, ignoreBadlines)
def getCopterType(self):
'''returns quad/hex/octo/tradheli if this is a copter log'''
if self.vehicleType != "ArduCopter":
return None
motLabels = []
if "MOT" in self.formats: # not listed in PX4 log header for some reason?
motLabels = self.formats["MOT"].labels
if "GGain" in motLabels:
return "tradheli"
elif len(motLabels) == 4:
return "quad"
elif len(motLabels) == 6:
return "hex"
elif len(motLabels) == 8:
return "octo"
else:
return ""
def read(self, logfile, format="auto", ignoreBadlines=False):
'''returns on successful log read (including bad lines if ignoreBadlines==True), will throw an Exception otherwise'''
# TODO: dataflash log parsing code is pretty hacky, should re-write more methodically
self.filename = logfile
if self.filename == '<stdin>':
f = sys.stdin
else:
f = open(self.filename, 'r')
if format == 'bin':
head = '\xa3\x95\x80\x80'
elif format == 'log':
head = ""
elif format == 'auto':
if self.filename == '<stdin>':
# assuming TXT format
# raise ValueError("Invalid log format for stdin: {}".format(format))
head = ""
else:
head = f.read(4)
f.seek(0)
else:
raise ValueError("Unknown log format for {}: {}".format(self.filename, format))
if head == '\xa3\x95\x80\x80':
numBytes, lineNumber = self.read_binary(f, ignoreBadlines)
pass
else:
numBytes, lineNumber = self.read_text(f, ignoreBadlines)
# gather some general stats about the log
self.lineCount = lineNumber
self.filesizeKB = numBytes / 1024.0
# TODO: switch duration calculation to use TimeMS values rather than GPS timestemp
if "GPS" in self.channels:
# the GPS time label changed at some point, need to handle both
timeLabel = None
for i in 'TimeMS','TimeUS','Time':
if i in self.channels["GPS"]:
timeLabel = i
break
firstTimeGPS = self.channels["GPS"][timeLabel].listData[0][1]
lastTimeGPS = self.channels["GPS"][timeLabel].listData[-1][1]
if timeLabel == 'TimeUS':
firstTimeGPS /= 1000
lastTimeGPS /= 1000
self.durationSecs = (lastTimeGPS-firstTimeGPS) / 1000
# TODO: calculate logging rate based on timestamps
# ...
def process(self, lineNumber, e):
if e.NAME == 'FMT':
cls = e.to_class()
if cls is not None: # FMT messages can be broken ...
if hasattr(e, 'type') and e.type not in self._formats: # binary log specific
self._formats[e.type] = cls
if cls.NAME not in self.formats:
self.formats[cls.NAME] = cls
elif e.NAME == "PARM":
self.parameters[e.Name] = e.Value
elif e.NAME == "MSG":
if not self.vehicleType:
tokens = e.Message.split(' ')
vehicleTypes = ["ArduPlane", "ArduCopter", "ArduRover"]
self.vehicleType = tokens[0]
self.firmwareVersion = tokens[1]
if len(tokens) == 3:
self.firmwareHash = tokens[2][1:-1]
else:
self.messages[lineNumber] = e.Message
elif e.NAME == "MODE":
if self.vehicleType in ["ArduCopter"]:
try:
modes = {0:'STABILIZE',
1:'ACRO',
2:'ALT_HOLD',
3:'AUTO',
4:'GUIDED',
5:'LOITER',
6:'RTL',
7:'CIRCLE',
9:'LAND',
10:'OF_LOITER',
11:'DRIFT',
13:'SPORT',
14:'FLIP',
15:'AUTOTUNE',
16:'HYBRID',}
self.modeChanges[lineNumber] = (modes[int(e.Mode)], e.ThrCrs)
except:
self.modeChanges[lineNumber] = (e.Mode, e.ThrCrs)
elif self.vehicleType in ["ArduPlane", "APM:Plane", "ArduRover", "APM:Rover", "APM:Copter"]:
self.modeChanges[lineNumber] = (e.Mode, e.ModeNum)
else:
raise Exception("Unknown log type for MODE line {} {}".format(self.vehicleType, repr(e)))
# anything else must be the log data
else:
groupName = e.NAME
# first time seeing this type of log line, create the channel storage
if not groupName in self.channels:
self.channels[groupName] = {}
for label in e.labels:
self.channels[groupName][label] = Channel()
# store each token in its relevant channel
for label in e.labels:
value = getattr(e, label)
channel = self.channels[groupName][label]
channel.dictData[lineNumber] = value
channel.listData.append((lineNumber, value))
def read_text(self, f, ignoreBadlines):
self.formats = {'FMT':Format}
lineNumber = 0
numBytes = 0
knownHardwareTypes = ["APM", "PX4", "MPNG"]
for line in f:
lineNumber = lineNumber + 1
numBytes += len(line) + 1
try:
#print "Reading line: %d" % lineNumber
line = line.strip('\n\r')
tokens = line.split(', ')
# first handle the log header lines
if line == " Ready to drive." or line == " Ready to FLY.":
continue
if line == "----------------------------------------": # present in pre-3.0 logs
raise Exception("Log file seems to be in the older format (prior to self-describing logs), which isn't supported")
if len(tokens) == 1:
tokens2 = line.split(' ')
if line == "":
pass
elif len(tokens2) == 1 and tokens2[0].isdigit(): # log index
pass
elif len(tokens2) == 3 and tokens2[0] == "Free" and tokens2[1] == "RAM:":
self.freeRAM = int(tokens2[2])
elif tokens2[0] in knownHardwareTypes:
self.hardwareType = line # not sure if we can parse this more usefully, for now only need to report it back verbatim
elif (len(tokens2) == 2 or len(tokens2) == 3) and tokens2[1][0].lower() == "v": # e.g. ArduCopter V3.1 (5c6503e2)
self.vehicleType = tokens2[0]
self.firmwareVersion = tokens2[1]
if len(tokens2) == 3:
self.firmwareHash = tokens2[2][1:-1]
else:
errorMsg = "Error parsing line %d of log file: %s" % (lineNumber, self.filename)
if ignoreBadlines:
print(errorMsg + " (skipping line)", file=sys.stderr)
self.skippedLines += 1
else:
raise Exception("")
else:
if not tokens[0] in self.formats:
raise ValueError("Unknown Format {}".format(tokens[0]))
e = self.formats[tokens[0]](*tokens[1:])
self.process(lineNumber, e)
except Exception as e:
print("BAD LINE: " + line, file=sys.stderr)
if not ignoreBadlines:
raise Exception("Error parsing line %d of log file %s - %s" % (lineNumber,self.filename,e.args[0]))
return (numBytes,lineNumber)
def read_binary(self, f, ignoreBadlines):
lineNumber = 0
numBytes = 0
for e in self._read_binary(f, ignoreBadlines):
lineNumber += 1
if e is None:
continue
numBytes += e.SIZE
# print(e)
self.process(lineNumber, e)
return (numBytes,lineNumber)
def _read_binary(self, f, ignoreBadlines):
self._formats = {128:BinaryFormat}
data = bytearray(f.read())
offset = 0
while len(data) > offset + ctypes.sizeof(logheader):
h = logheader.from_buffer(data, offset)
if not (h.head1 == 0xa3 and h.head2 == 0x95):
if ignoreBadlines == False:
raise ValueError(h)
else:
if h.head1 == 0xff and h.head2 == 0xff and h.msgid == 0xff:
print("Assuming EOF due to dataflash block tail filled with \\xff... (offset={off})".format(off=offset), file=sys.stderr)
break
if h.msgid in self._formats:
typ = self._formats[h.msgid]
if len(data) <= offset + typ.SIZE:
break
try:
e = typ.from_buffer(data, offset)
except:
print("data:{} offset:{} size:{} sizeof:{} sum:{}".format(len(data),offset,typ.SIZE,ctypes.sizeof(typ),offset+typ.SIZE))
raise
offset += typ.SIZE
else:
raise ValueError(str(h) + "unknown type")
yield e
| gpl-3.0 |
pombredanne/MOG | nova/network/security_group/neutron_driver.py | 6 | 21310 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira Networks, Inc.
import sys
from neutronclient.common import exceptions as n_exc
from neutronclient.neutron import v2_0 as neutronv20
from oslo.config import cfg
from webob import exc
from nova.compute import api as compute_api
from nova import exception
from nova.network import neutronv2
from nova.network.security_group import security_group_base
from nova.objects import security_group
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
wrap_check_security_groups_policy = compute_api.policy_decorator(
scope='compute:security_groups')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SecurityGroupAPI(security_group_base.SecurityGroupBase):
id_is_uuid = True
def create_security_group(self, context, name, description):
neutron = neutronv2.get_client(context)
body = self._make_neutron_security_group_dict(name, description)
try:
security_group = neutron.create_security_group(
body).get('security_group')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
LOG.exception(_("Neutron Error creating security group %s"),
name)
if e.status_code == 401:
# TODO(arosen) Cannot raise generic response from neutron here
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
elif e.status_code == 409:
self.raise_over_quota(e.message)
raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_format(security_group)
def update_security_group(self, context, security_group,
name, description):
neutron = neutronv2.get_client(context)
body = self._make_neutron_security_group_dict(name, description)
try:
security_group = neutron.update_security_group(
security_group['id'], body).get('security_group')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
LOG.exception(_("Neutron Error updating security group %s"),
name)
if e.status_code == 401:
# TODO(arosen) Cannot raise generic response from neutron here
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_format(security_group)
def _convert_to_nova_security_group_format(self, security_group):
nova_group = {}
nova_group['id'] = security_group['id']
nova_group['description'] = security_group['description']
nova_group['name'] = security_group['name']
nova_group['project_id'] = security_group['tenant_id']
nova_group['rules'] = []
for rule in security_group.get('security_group_rules', []):
if rule['direction'] == 'ingress':
nova_group['rules'].append(
self._convert_to_nova_security_group_rule_format(rule))
return nova_group
def _convert_to_nova_security_group_rule_format(self, rule):
nova_rule = {}
nova_rule['id'] = rule['id']
nova_rule['parent_group_id'] = rule['security_group_id']
nova_rule['protocol'] = rule['protocol']
if (nova_rule['protocol'] and rule.get('port_range_min') is None and
rule.get('port_range_max') is None):
if nova_rule['protocol'].upper() == 'ICMP':
nova_rule['from_port'] = -1
nova_rule['to_port'] = -1
elif rule['protocol'].upper() in ['TCP', 'UDP']:
nova_rule['from_port'] = 1
nova_rule['to_port'] = 65535
else:
nova_rule['from_port'] = rule.get('port_range_min')
nova_rule['to_port'] = rule.get('port_range_max')
nova_rule['group_id'] = rule['remote_group_id']
nova_rule['cidr'] = self.parse_cidr(rule.get('remote_ip_prefix'))
return nova_rule
def get(self, context, name=None, id=None, map_exception=False):
neutron = neutronv2.get_client(context)
try:
if not id and name:
id = neutronv20.find_resourceid_by_name_or_id(
neutron, 'security_group', name)
group = neutron.show_security_group(id).get('security_group')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug(_("Neutron security group %s not found"), name)
self.raise_not_found(e.message)
else:
LOG.error(_("Neutron Error: %s"), e)
raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_format(group)
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
"""Returns list of security group rules owned by tenant."""
neutron = neutronv2.get_client(context)
search_opts = {}
if names:
search_opts['name'] = names
if ids:
search_opts['id'] = ids
if project:
search_opts['tenant_id'] = project
try:
security_groups = neutron.list_security_groups(**search_opts).get(
'security_groups')
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Neutron Error getting security groups"))
converted_rules = []
for security_group in security_groups:
converted_rules.append(
self._convert_to_nova_security_group_format(security_group))
return converted_rules
def validate_id(self, id):
if not uuidutils.is_uuid_like(id):
msg = _("Security group id should be uuid")
self.raise_invalid_property(msg)
return id
def destroy(self, context, security_group):
"""This function deletes a security group."""
neutron = neutronv2.get_client(context)
try:
neutron.delete_security_group(security_group['id'])
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
self.raise_not_found(e.message)
elif e.status_code == 409:
self.raise_invalid_property(e.message)
else:
LOG.error(_("Neutron Error: %s"), e)
raise exc_info[0], exc_info[1], exc_info[2]
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding muliple
security group rules at once but the EC2 one does. Therefore,
this function is writen to support both. Multiple rules are
installed to a security group in neutron using bulk support.
"""
neutron = neutronv2.get_client(context)
body = self._make_neutron_security_group_rules_list(vals)
try:
rules = neutron.create_security_group_rule(
body).get('security_group_rules')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.exception(_("Neutron Error getting security group %s"),
name)
self.raise_not_found(e.message)
elif e.status_code == 409:
LOG.exception(_("Neutron Error adding rules to security "
"group %s"), name)
self.raise_over_quota(e.message)
else:
LOG.exception(_("Neutron Error:"))
raise exc_info[0], exc_info[1], exc_info[2]
converted_rules = []
for rule in rules:
converted_rules.append(
self._convert_to_nova_security_group_rule_format(rule))
return converted_rules
def _make_neutron_security_group_dict(self, name, description):
return {'security_group': {'name': name,
'description': description}}
def _make_neutron_security_group_rules_list(self, rules):
new_rules = []
for rule in rules:
new_rule = {}
# nova only supports ingress rules so all rules are ingress.
new_rule['direction'] = "ingress"
new_rule['protocol'] = rule.get('protocol')
# FIXME(arosen) Nova does not expose ethertype on security group
# rules. Therefore, in the case of self referential rules we
# should probably assume they want to allow both IPv4 and IPv6.
# Unfortunately, this would require adding two rules in neutron.
# The reason we do not do this is because when the user using the
# nova api wants to remove the rule we'd have to have some way to
# know that we should delete both of these rules in neutron.
# For now, self referential rules only support IPv4.
if not rule.get('cidr'):
new_rule['ethertype'] = 'IPv4'
else:
new_rule['ethertype'] = utils.get_ip_version(rule.get('cidr'))
new_rule['remote_ip_prefix'] = rule.get('cidr')
new_rule['security_group_id'] = rule.get('parent_group_id')
new_rule['remote_group_id'] = rule.get('group_id')
if 'from_port' in rule and rule['from_port'] != -1:
new_rule['port_range_min'] = rule['from_port']
if 'to_port' in rule and rule['to_port'] != -1:
new_rule['port_range_max'] = rule['to_port']
new_rules.append(new_rule)
return {'security_group_rules': new_rules}
def remove_rules(self, context, security_group, rule_ids):
neutron = neutronv2.get_client(context)
rule_ids = set(rule_ids)
try:
# The ec2 api allows one to delete multiple security group rules
# at once. Since there is no bulk delete for neutron the best
# thing we can do is delete the rules one by one and hope this
# works.... :/
for rule_id in range(0, len(rule_ids)):
neutron.delete_security_group_rule(rule_ids.pop())
except n_exc.NeutronClientException as e:
with excutils.save_and_reraise_exception():
LOG.exception(_("Neutron Error unable to delete %s"), rule_ids)
def get_rule(self, context, id):
neutron = neutronv2.get_client(context)
try:
rule = neutron.show_security_group_rule(
id).get('security_group_rule')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug(_("Neutron security group rule %s not found"), id)
self.raise_not_found(e.message)
else:
LOG.error(_("Neutron Error: %s"), e)
raise exc_info[0], exc_info[1], exc_info[2]
return self._convert_to_nova_security_group_rule_format(rule)
def get_instances_security_groups_bindings(self, context):
"""Returns a dict(instance_id, [security_groups]) to allow obtaining
all of the instances and their security groups in one shot.
"""
neutron = neutronv2.get_client(context)
ports = neutron.list_ports().get('ports')
security_groups = neutron.list_security_groups().get('security_groups')
security_group_lookup = {}
instances_security_group_bindings = {}
for security_group in security_groups:
security_group_lookup[security_group['id']] = security_group
for port in ports:
for port_security_group in port.get('security_groups', []):
try:
sg = security_group_lookup[port_security_group]
# name is optional in neutron so if not specified return id
if sg.get('name'):
sg_entry = {'name': sg['name']}
else:
sg_entry = {'name': sg['id']}
instances_security_group_bindings.setdefault(
port['device_id'], []).append(sg_entry)
except KeyError:
# This should only happen due to a race condition
# if the security group on a port was deleted after the
# ports were returned. We pass since this security
# group is no longer on the port.
pass
return instances_security_group_bindings
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
"""Returns the security groups that are associated with an instance.
If detailed is True then it also returns the full details of the
security groups associated with an instance.
"""
neutron = neutronv2.get_client(context)
params = {'device_id': instance_uuid}
ports = neutron.list_ports(**params)
security_groups = neutron.list_security_groups().get('security_groups')
security_group_lookup = {}
for security_group in security_groups:
security_group_lookup[security_group['id']] = security_group
ret = []
for port in ports['ports']:
for security_group in port.get('security_groups', []):
try:
if detailed:
ret.append(self._convert_to_nova_security_group_format(
security_group_lookup[security_group]))
else:
name = security_group_lookup[security_group].get(
'name')
# Since the name is optional for
# neutron security groups
if not name:
name = security_group
ret.append({'name': name})
except KeyError:
# This should only happen due to a race condition
# if the security group on a port was deleted after the
# ports were returned. We pass since this security
# group is no longer on the port.
pass
return ret
def _has_security_group_requirements(self, port):
port_security_enabled = port.get('port_security_enabled', True)
has_ip = port.get('fixed_ips')
if has_ip:
return port_security_enabled
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
neutron = neutronv2.get_client(context)
try:
security_group_id = neutronv20.find_resourceid_by_name_or_id(
neutron, 'security_group', security_group_name)
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
msg = ("Security group %s is not found for project %s" %
(security_group_name, context.project_id))
self.raise_not_found(msg)
else:
LOG.exception(_("Neutron Error:"))
raise exc_info[0], exc_info[1], exc_info[2]
params = {'device_id': instance['uuid']}
try:
ports = neutron.list_ports(**params).get('ports')
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Neutron Error:"))
if not ports:
msg = ("instance_id %s could not be found as device id on"
" any ports" % instance['uuid'])
self.raise_not_found(msg)
for port in ports:
if not self._has_security_group_requirements(port):
LOG.warn(_("Cannot add security group %(name)s to %(instance)s"
" since the port %(port_id)s does not meet security"
" requirements"), {'name': security_group_name,
'instance': instance['uuid'], 'port_id': port['id']})
raise exception.SecurityGroupCannotBeApplied()
if 'security_groups' not in port:
port['security_groups'] = []
port['security_groups'].append(security_group_id)
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info(_("Adding security group %(security_group_id)s to "
"port %(port_id)s"),
{'security_group_id': security_group_id,
'port_id': port['id']})
neutron.update_port(port['id'], {'port': updated_port})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Neutron Error:"))
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
neutron = neutronv2.get_client(context)
try:
security_group_id = neutronv20.find_resourceid_by_name_or_id(
neutron, 'security_group', security_group_name)
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
msg = ("Security group %s is not found for project %s" %
(security_group_name, context.project_id))
self.raise_not_found(msg)
else:
LOG.exception(_("Neutron Error:"))
raise exc_info[0], exc_info[1], exc_info[2]
params = {'device_id': instance['uuid']}
try:
ports = neutron.list_ports(**params).get('ports')
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Neutron Error:"))
if not ports:
msg = ("instance_id %s could not be found as device id on"
" any ports" % instance['uuid'])
self.raise_not_found(msg)
found_security_group = False
for port in ports:
try:
port.get('security_groups', []).remove(security_group_id)
except ValueError:
# When removing a security group from an instance the security
# group should be on both ports since it was added this way if
# done through the nova api. In case it is not a 404 is only
# raised if the security group is not found on any of the
# ports on the instance.
continue
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info(_("Adding security group %(security_group_id)s to "
"port %(port_id)s"),
{'security_group_id': security_group_id,
'port_id': port['id']})
neutron.update_port(port['id'], {'port': updated_port})
found_security_group = True
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Neutron Error:"))
if not found_security_group:
msg = (_("Security group %(security_group_name)s not assocaited "
"with the instance %(instance)s"),
{'security_group_name': security_group_name,
'instance': instance['uuid']})
self.raise_not_found(msg)
def populate_security_groups(self, instance, security_groups):
# Setting to emply list since we do not want to populate this field
# in the nova database if using the neutron driver
instance['security_groups'] = security_group.SecurityGroupList()
instance['security_groups'].objects = []
| apache-2.0 |
steebchen/youtube-dl | devscripts/prepare_manpage.py | 28 | 2188 | from __future__ import unicode_literals
import io
import optparse
import os.path
import re
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
README_FILE = os.path.join(ROOT_DIR, 'README.md')
PREFIX = r'''%YOUTUBE-DL(1)
# NAME
youtube\-dl \- download videos from youtube.com or other video platforms
# SYNOPSIS
**youtube-dl** \[OPTIONS\] URL [URL...]
'''
def main():
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Expected an output filename')
outfile, = args
with io.open(README_FILE, encoding='utf-8') as f:
readme = f.read()
readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
readme = PREFIX + readme
readme = filter_options(readme)
with io.open(outfile, 'w', encoding='utf-8') as outf:
outf.write(readme)
def filter_options(readme):
ret = ''
in_options = False
for line in readme.split('\n'):
if line.startswith('# '):
if line[2:].startswith('OPTIONS'):
in_options = True
else:
in_options = False
if in_options:
if line.lstrip().startswith('-'):
split = re.split(r'\s{2,}', line.lstrip())
# Description string may start with `-` as well. If there is
# only one piece then it's a description bit not an option.
if len(split) > 1:
option, description = split
split_option = option.split(' ')
if not split_option[-1].startswith('-'): # metavar
option = ' '.join(split_option[:-1] + ['*%s*' % split_option[-1]])
# Pandoc's definition_lists. See http://pandoc.org/README.html
# for more information.
ret += '\n%s\n: %s\n' % (option, description)
continue
ret += line.lstrip() + '\n'
else:
ret += line + '\n'
return ret
if __name__ == '__main__':
main()
| unlicense |
glowka/django-getpaid | getpaid/models.py | 3 | 5550 | import sys
from datetime import datetime
from django.apps import apps
from django.db import models
from django.utils import six
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from .abstract_mixin import AbstractMixin
from getpaid import signals
from .utils import import_backend_modules
from django.conf import settings
if six.PY3:
unicode = str
PAYMENT_STATUS_CHOICES = (
('new', _("new")),
('in_progress', _("in progress")),
('accepted_for_proc', _("accepted for processing")),
('partially_paid', _("partially paid")),
('paid', _("paid")),
('cancelled', _("cancelled")),
('failed', _("failed")),
)
class PaymentManager(models.Manager):
def get_queryset(self):
return super(PaymentManager, self).get_queryset().select_related('order')
@python_2_unicode_compatible
class PaymentFactory(models.Model, AbstractMixin):
"""
This is an abstract class that defines a structure of Payment model that will be
generated dynamically with one additional field: ``order``
"""
amount = models.DecimalField(_("amount"), decimal_places=4, max_digits=20)
currency = models.CharField(_("currency"), max_length=3)
status = models.CharField(_("status"), max_length=20, choices=PAYMENT_STATUS_CHOICES, default='new', db_index=True)
backend = models.CharField(_("backend"), max_length=50)
created_on = models.DateTimeField(_("created on"), auto_now_add=True, db_index=True)
paid_on = models.DateTimeField(_("paid on"), blank=True, null=True, default=None, db_index=True)
amount_paid = models.DecimalField(_("amount paid"), decimal_places=4, max_digits=20, default=0)
external_id = models.CharField(_("external id"), max_length=64, blank=True, null=True)
description = models.CharField(_("description"), max_length=128, blank=True, null=True)
class Meta:
abstract = True
def __str__(self):
return _("Payment #%(id)d") % {'id': self.id}
@classmethod
def contribute(cls, order, **kwargs):
return {'order': models.ForeignKey(order, **kwargs)}
@classmethod
def create(cls, order, backend):
"""
Builds Payment object based on given Order instance
"""
payment = Payment()
payment.order = order
payment.backend = backend
signals.new_payment_query.send(sender=None, order=order, payment=payment)
if payment.currency is None or payment.amount is None:
raise NotImplementedError('Please provide a listener for getpaid.signals.new_payment_query')
payment.save()
signals.new_payment.send(sender=None, order=order, payment=payment)
return payment
def get_processor(self):
try:
__import__(self.backend)
module = sys.modules[self.backend]
return module.PaymentProcessor
except (ImportError, AttributeError):
raise ValueError("Backend '%s' is not available or provides no processor." % self.backend)
def change_status(self, new_status):
"""
Always change payment status via this method. Otherwise the signal
will not be emitted.
"""
if self.status != new_status:
# do anything only when status is really changed
old_status = self.status
self.status = new_status
self.save()
signals.payment_status_changed.send(
sender=type(self), instance=self,
old_status=old_status, new_status=new_status
)
def on_success(self, amount=None):
"""
Called when payment receives successful balance income. It defaults to
complete payment, but can optionally accept received amount as a parameter
to handle partial payments.
Returns boolean value if payment was fully paid
"""
if getattr(settings, 'USE_TZ', False):
self.paid_on = datetime.utcnow().replace(tzinfo=utc)
else:
self.paid_on = datetime.now()
if amount:
self.amount_paid = amount
else:
self.amount_paid = self.amount
fully_paid = (self.amount_paid >= self.amount)
if fully_paid:
self.change_status('paid')
else:
self.change_status('partially_paid')
return fully_paid
def on_failure(self):
"""
Called when payment was failed
"""
self.change_status('failed')
def register_to_payment(order_class, **kwargs):
"""
A function for registering unaware order class to ``getpaid``. This will
generate a ``Payment`` model class that will store payments with
ForeignKey to original order class
This also will build a model class for every enabled backend.
"""
global Payment
global Order
class Payment(PaymentFactory.construct(order=order_class, **kwargs)):
objects = PaymentManager()
class Meta:
ordering = ('-created_on',)
verbose_name = _("Payment")
verbose_name_plural = _("Payments")
Order = order_class
# Now build models for backends
backend_models_modules = import_backend_modules('models')
for backend_name, models_module in backend_models_modules.items():
for model in models_module.build_models(Payment):
apps.register_model(backend_name, model)
return Payment
| mit |
danilito19/django | tests/flatpages_tests/test_middleware.py | 290 | 8134 | from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_authenticated_flatpage(self):
"A flatpage served by the middleware can require authentication"
response = self.client.get('/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/sekrit/')
User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view and should not add a slash"
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware and should add a slash"
response = self.client.get('/flatpage')
self.assertRedirects(response, '/flatpage/', status_code=301)
def test_redirect_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware and should not add a slash"
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here')
self.assertRedirects(response, '/some.very_special~chars-here/', status_code=301)
def test_redirect_fallback_flatpage_root(self):
"A flatpage at / should not cause a redirect loop when APPEND_SLASH is set"
fp = FlatPage.objects.create(
url="/",
title="Root",
content="Root",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Root</p>")
| bsd-3-clause |
direvus/ansible | test/legacy/cleanup_rax.py | 160 | 6547 | #!/usr/bin/env python
import os
import re
import yaml
import argparse
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.six.moves import input
def rax_list_iterator(svc, *args, **kwargs):
method = kwargs.pop('method', 'list')
items = getattr(svc, method)(*args, **kwargs)
while items:
retrieved = getattr(svc, method)(*args, marker=items[-1].id, **kwargs)
if items and retrieved and items[-1].id == retrieved[0].id:
del items[-1]
items.extend(retrieved)
if len(retrieved) < 2:
break
return items
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-y', '--yes', action='store_true', dest='assumeyes',
default=False, help="Don't prompt for confirmation")
parser.add_argument('--match', dest='match_re',
default='^ansible-testing',
help='Regular expression used to find resources '
'(default: %(default)s)')
return parser.parse_args()
def authenticate():
try:
with open(os.path.realpath('./credentials.yml')) as f:
credentials = yaml.load(f)
except Exception as e:
raise SystemExit(e)
try:
pyrax.set_credentials(credentials.get('rackspace_username'),
credentials.get('rackspace_api_key'))
except Exception as e:
raise SystemExit(e)
def prompt_and_delete(item, prompt, assumeyes):
if not assumeyes:
assumeyes = input(prompt).lower() == 'y'
assert hasattr(item, 'delete') or hasattr(item, 'terminate'), \
"Class <%s> has no delete or terminate attribute" % item.__class__
if assumeyes:
if hasattr(item, 'delete'):
item.delete()
print("Deleted %s" % item)
if hasattr(item, 'terminate'):
item.terminate()
print("Terminated %s" % item)
def delete_rax(args):
"""Function for deleting CloudServers"""
print("--- Cleaning CloudServers matching '%s'" % args.match_re)
search_opts = dict(name='^%s' % args.match_re)
for region in pyrax.identity.services.compute.regions:
cs = pyrax.connect_to_cloudservers(region=region)
servers = rax_list_iterator(cs.servers, search_opts=search_opts)
for server in servers:
prompt_and_delete(server,
'Delete matching %s? [y/n]: ' % server,
args.assumeyes)
def delete_rax_clb(args):
"""Function for deleting Cloud Load Balancers"""
print("--- Cleaning Cloud Load Balancers matching '%s'" % args.match_re)
for region in pyrax.identity.services.load_balancer.regions:
clb = pyrax.connect_to_cloud_loadbalancers(region=region)
for lb in rax_list_iterator(clb):
if re.search(args.match_re, lb.name):
prompt_and_delete(lb,
'Delete matching %s? [y/n]: ' % lb,
args.assumeyes)
def delete_rax_keypair(args):
"""Function for deleting Rackspace Key pairs"""
print("--- Cleaning Key Pairs matching '%s'" % args.match_re)
for region in pyrax.identity.services.compute.regions:
cs = pyrax.connect_to_cloudservers(region=region)
for keypair in cs.keypairs.list():
if re.search(args.match_re, keypair.name):
prompt_and_delete(keypair,
'Delete matching %s? [y/n]: ' % keypair,
args.assumeyes)
def delete_rax_network(args):
"""Function for deleting Cloud Networks"""
print("--- Cleaning Cloud Networks matching '%s'" % args.match_re)
for region in pyrax.identity.services.network.regions:
cnw = pyrax.connect_to_cloud_networks(region=region)
for network in cnw.list():
if re.search(args.match_re, network.name):
prompt_and_delete(network,
'Delete matching %s? [y/n]: ' % network,
args.assumeyes)
def delete_rax_cbs(args):
"""Function for deleting Cloud Networks"""
print("--- Cleaning Cloud Block Storage matching '%s'" % args.match_re)
for region in pyrax.identity.services.network.regions:
cbs = pyrax.connect_to_cloud_blockstorage(region=region)
for volume in cbs.list():
if re.search(args.match_re, volume.name):
prompt_and_delete(volume,
'Delete matching %s? [y/n]: ' % volume,
args.assumeyes)
def delete_rax_cdb(args):
"""Function for deleting Cloud Databases"""
print("--- Cleaning Cloud Databases matching '%s'" % args.match_re)
for region in pyrax.identity.services.database.regions:
cdb = pyrax.connect_to_cloud_databases(region=region)
for db in rax_list_iterator(cdb):
if re.search(args.match_re, db.name):
prompt_and_delete(db,
'Delete matching %s? [y/n]: ' % db,
args.assumeyes)
def _force_delete_rax_scaling_group(manager):
def wrapped(uri):
manager.api.method_delete('%s?force=true' % uri)
return wrapped
def delete_rax_scaling_group(args):
"""Function for deleting Autoscale Groups"""
print("--- Cleaning Autoscale Groups matching '%s'" % args.match_re)
for region in pyrax.identity.services.autoscale.regions:
asg = pyrax.connect_to_autoscale(region=region)
for group in rax_list_iterator(asg):
if re.search(args.match_re, group.name):
group.manager._delete = \
_force_delete_rax_scaling_group(group.manager)
prompt_and_delete(group,
'Delete matching %s? [y/n]: ' % group,
args.assumeyes)
def main():
if not HAS_PYRAX:
raise SystemExit('The pyrax python module is required for this script')
args = parse_args()
authenticate()
funcs = [f for n, f in globals().items() if n.startswith('delete_rax')]
for func in sorted(funcs, key=lambda f: f.__name__):
try:
func(args)
except Exception as e:
print("---- %s failed (%s)" % (func.__name__, e.message))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nExiting...')
| gpl-3.0 |
jhaals/ansible-modules-core | utilities/logic/pause.py | 37 | 1494 | # -*- mode: python -*-
DOCUMENTATION = '''
---
module: pause
short_description: Pause playbook execution
description:
- Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to pause with a prompt.
- "You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early: press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a)."
- "The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts."
version_added: "0.8"
options:
minutes:
description:
- Number of minutes to pause for.
required: false
default: null
seconds:
description:
- Number of seconds to pause for.
required: false
default: null
prompt:
description:
- Optional text to use for the prompt message.
required: false
default: null
author: "Tim Bielawa (@tbielawa)"
'''
EXAMPLES = '''
# Pause for 5 minutes to build app cache.
- pause: minutes=5
# Pause until you can verify updates to an application were successful.
- pause:
# A helpful reminder of what to look out for post-update.
- pause: prompt="Make sure org.foo.FooOverload exception is not present"
'''
| gpl-3.0 |
raj-bhatia/grooveip-ios-public | submodules/externals/antlr3/runtime/Python/antlr3/constants.py | 123 | 2111 | """ANTLR3 runtime package"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
EOF = -1
## All tokens go to the parser (unless skip() is called in that rule)
# on a particular "channel". The parser tunes to a particular channel
# so that whitespace etc... can go to the parser on a "hidden" channel.
DEFAULT_CHANNEL = 0
## Anything on different channel than DEFAULT_CHANNEL is not parsed
# by parser.
HIDDEN_CHANNEL = 99
# Predefined token types
EOR_TOKEN_TYPE = 1
##
# imaginary tree navigation type; traverse "get child" link
DOWN = 2
##
#imaginary tree navigation type; finish with a child list
UP = 3
MIN_TOKEN_TYPE = UP+1
INVALID_TOKEN_TYPE = 0
| gpl-2.0 |
tkaitchuck/nupic | external/common/lib/python2.6/site-packages/logilab/common/test/unittest_changelog.py | 3 | 1448 | # copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
from os.path import join, dirname
from cStringIO import StringIO
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.changelog import ChangeLog
class ChangeLogTC(TestCase):
cl_class = ChangeLog
cl_file = join(dirname(__file__), 'data', 'ChangeLog')
def test_round_trip(self):
cl = self.cl_class(self.cl_file)
out = StringIO()
cl.write(out)
with open(self.cl_file) as stream:
self.assertMultiLineEqual(stream.read(), out.getvalue())
if __name__ == '__main__':
unittest_main()
| gpl-3.0 |
hrjn/scikit-learn | sklearn/model_selection/_split.py | 7 | 68700 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav RV <rvraghav93@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..utils.random import choice
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'RepeatedStratifiedKFold',
'RepeatedKFold',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
See also
--------
RepeatedStratifiedKFold: Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
X, y, groups = indexable(X, y, groups)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class _RepeatedSplits(with_metaclass(ABCMeta)):
"""Repeated splits for an arbitrary randomized CV splitter.
Repeats splits for cross-validators n times with different randomization
in each repetition.
Parameters
----------
cv : callable
Cross-validator class.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
**cvargs : additional params
Constructor parameters for cv. Must not contain random_state
and shuffle.
"""
def __init__(self, cv, n_repeats=10, random_state=None, **cvargs):
if not isinstance(n_repeats, (np.integer, numbers.Integral)):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 1:
raise ValueError("Number of repetitions must be greater than 1.")
if any(key in cvargs for key in ('random_state', 'shuffle')):
raise ValueError(
"cvargs must not contain random_state or shuffle.")
self.cv = cv
self.n_repeats = n_repeats
self.random_state = random_state
self.cvargs = cvargs
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
class RepeatedKFold(_RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold n times with different randomization in each repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> for train_index, test_index in rkf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [0 1] TEST: [2 3]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
See also
--------
RepeatedStratifiedKFold: Repeates Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedKFold, self).__init__(
KFold, n_repeats, random_state, n_splits=n_splits)
class RepeatedStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> for train_index, test_index in rskf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
See also
--------
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedStratifiedKFold, self).__init__(
StratifiedKFold, n_repeats, random_state, n_splits=n_splits)
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the test split. If
int, represents the absolute number of test groups. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/examples/benchmark/sample_benchmark.py | 94 | 1605 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sample TensorFlow benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
# Define a class that extends from tf.test.Benchmark.
class SampleBenchmark(tf.test.Benchmark):
# Note: benchmark method name must start with `benchmark`.
def benchmarkSum(self):
with tf.Session() as sess:
x = tf.constant(10)
y = tf.constant(5)
result = tf.add(x, y)
iters = 100
start_time = time.time()
for _ in range(iters):
sess.run(result)
total_wall_time = time.time() - start_time
# Call report_benchmark to report a metric value.
self.report_benchmark(
name="sum_wall_time",
# This value should always be per iteration.
wall_time=total_wall_time/iters,
iters=iters)
if __name__ == "__main__":
tf.test.main()
| mit |
bbc/kamaelia | Sketches/MH/DocGen/renderHTML.py | 9 | 8546 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
========================
Doctree to HTML Renderer
========================
Renderer for converting docutils document trees to HTML output with Kamaelia
website specific directives, and automatic links for certain text patterns.
"""
import textwrap
import inspect
import pprint
import time
from docutils import core
from docutils import nodes
import docutils
import re
class RenderHTML(object):
"""\
RenderHTML([debug][,titlePrefix][,urlPrefix][,rawFooter]) -> new RenderHTML object
Renders docutils document trees to html with Kamaelia website specific
directives.
Also contains helper functions for determining filenames and URIs for
documents.
Keyword arguments::
- debug -- Optional. True for debugging mode - currently does nothing (default=False)
- titlePrefix -- Optional. Prefix for the HTML <head><title> (default="")
- urlPrefix -- Optional. Prefix for all URLs. Should include a trailing slash if needed (default="")
- rawFooter -- Optional. Footer text that will be appended just before the </body></html> tags (default="")
"""
def __init__(self, debug=False, titlePrefix="", urlPrefix="",rawFooter=""):
super(RenderHTML,self).__init__()
self.titlePrefix=titlePrefix
self.debug=debug
self.urlPrefix=urlPrefix
self.rawFooter=rawFooter
self.mappings={}
def makeFilename(self, docName):
"""\
Returns the file name for a given document name.
Eg. "Kamaelia.Chassis" will be mapped to something like "Kamaelia.Chassis.html"
"""
return docName + ".html"
def makeURI(self, docName,internalRef=None):
"""\
Returns the URI for a given document name. Takes into account the url prefix.
Eg. "Kamaelia.Chassis" will be mapped to something like "/mydocs/Kamaelia.Chassis.html"
"""
if internalRef is not None:
suffix="#"+internalRef
else:
suffix=""
return self.urlPrefix+self.makeFilename(docName)+suffix
def setAutoCrossLinks(self, mappings):
"""\
Set mapping for the automagic generation of hyperlinks between content.
Supply a dict of mappings mapping patterns (strings) to the fully qualified
entity name to be linked to.
"""
self.mappings = {}
for (key,ref) in mappings.items():
# compile as an RE - detects the pattern providing nothign preceeds it,
# and it is not part of a larger pattern, eg A.B is part of A.B.C
pattern=re.compile("(?<![a-zA-Z0-9._])"+re.escape(key)+"(?!\.?[a-zA-Z0-9_])")
# convert the destination to a URI
uri = self.makeURI(ref)
self.mappings[pattern] = uri
def addAutoLinksToURI(self, mappings):
for (key,uri) in mappings.items():
pattern=re.compile("(?<![a-zA-Z0-9._])"+re.escape(key)+"(?!\.?[a-zA-Z0-9_])")
self.mappings[pattern] = uri
def render(self, docName, docTree):
"""\
Render the named document tree as HTML with Kamaelia website specific directives.
Returns string containing the entire HTML document.
"""
if not isinstance(docTree, nodes.document):
root = core.publish_doctree('')
root.append(docTree)
docTree = root
docTree.attributes['title']=docName
# do this first, before we turn the boxright nodes into "[ [boxright] ... ]"
docTree.transformer.add_transform(squareBracketReplace_transform)
docTree.transformer.apply_transforms()
docTree.transformer.add_transform(boxright_transform)
docTree.transformer.add_transform(crosslink_transform, priority=None, mappings=self.mappings)
docTree.transformer.apply_transforms()
reader = docutils.readers.doctree.Reader(parser_name='null')
pub = core.Publisher(reader, None, None, source=docutils.io.DocTreeInput(docTree),
destination_class=docutils.io.StringOutput)
pub.set_writer("html")
output = pub.publish(enable_exit_status=None)
parts = pub.writer.parts
doc = parts["html_title"] \
+ parts["html_subtitle"] \
+ parts["docinfo"] \
+ parts["fragment"]
wholedoc = self.headers(docTree) + doc + self.footers(docTree)
return wholedoc
def headers(self,doc):
title = self.titlePrefix + doc.attributes['title']
return """\
<html>
<head>
<title>"""+title+"""</title>
<style type="test/css">
pre.literal-block, pre.doctest-block {
margin-left: 2em ;
margin-right: 2em ;
background-color: #eeeeee }
</style>
</head>
<body>
"""
def footers(self,doc):
return self.rawFooter+"</body></html>\n"
from Nodes import boxright
class boxright_transform(docutils.transforms.Transform):
"""\
Transform that replaces boxright nodes with the corresponding Kamaelia
website [[boxright] <child node content> ] directive
"""
default_priority=100
def apply(self):
boxes=[]
for target in self.document.traverse(boxright):
target.insert(0, nodes.Text("[[boxright] "))
target.append(nodes.Text("]"))
boxes.append(target)
for box in boxes:
box.replace_self( nodes.container('', *box.children) )
class crosslink_transform(docutils.transforms.Transform):
"""\
Transform that searches text in the document for any of the patterns in the
supplied set of mappings. If a pattern is found it is converted to a
hyperlink
"""
default_priority=100
def apply(self, mappings):
self.mappings = mappings
self.recurse(self.document)
def recurse(self, parent):
i=0
while i<len(parent.children):
thisNode = parent[i]
if isinstance(thisNode, nodes.Text):
changeMade = self.crosslink(parent, i)
if not changeMade:
i=i+1
else:
if isinstance(thisNode, (nodes.reference,)): # nodes.literal_block)):
pass
elif thisNode.children:
self.recurse(thisNode)
i=i+1
def crosslink(self, parent, i):
text = parent[i].astext()
for pattern in self.mappings.keys():
match = pattern.search(text)
if match:
head = text[:match.start()]
tail = text[match.end():]
middle = text[match.start():match.end()]
URI = self.mappings[pattern]
parent.remove(parent[i])
if tail:
parent.insert(i, nodes.Text(tail))
if middle:
parent.insert(i, nodes.reference('', nodes.Text(middle), refuri=URI))
if head:
parent.insert(i, nodes.Text(head))
return True
return False
class squareBracketReplace_transform(docutils.transforms.Transform):
"""\
Transform that replaces square brackets in text with escape codes, so that
the Kamaelia website doesn't interpret them as directives
"""
default_priority=100
def apply(self):
for target in self.document.traverse(nodes.Text):
newText = target.replace("[","%91%")
newText = newText.replace("]","%93%")
target.parent.replace(target, newText)
| apache-2.0 |
ericd/redeem | redeem/path_planner/setup.py | 2 | 1206 | #!/usr/bin/env python
from distutils.core import setup, Extension
import os
from distutils.sysconfig import get_config_vars
(opt,) = get_config_vars('OPT')
os.environ['OPT'] = " ".join(
flag for flag in opt.split() if flag != '-Wstrict-prototypes'
)
pathplanner = Extension('_PathPlannerNative',
sources = ['PathPlannerNative.i',
'PathPlanner.cpp',
'PathPlannerSetup.cpp',
'Preprocessor.cpp',
'Path.cpp',
'Delta.cpp',
'vector3.cpp',
'PruTimer.cpp',
'prussdrv.c',
'Logger.cpp'],
swig_opts=['-c++','-builtin'],
extra_compile_args = [
'-std=c++0x',
'-g',
'-Ofast',
'-fpermissive',
'-D_GLIBCXX_USE_NANOSLEEP',
'-DBUILD_PYTHON_EXT=1',
'-Wno-write-strings',
'-Wno-maybe-uninitialized',
'-Wno-format',
'-DDEBUG=1'])
setup(name='PathPlannerNative',
version='1.0',
description='PathPlanner for 3D printer',
author='Mathieu Monney',
author_email='zittix@xwaves.net',
url='http://www.xwaves.net',
ext_modules = [pathplanner],
)
| gpl-3.0 |
NaturalGIS/QGIS | tests/src/python/test_qgsvectorfilewriter.py | 3 | 49036 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVectorFileWriter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import next
from builtins import str
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsField,
QgsGeometry,
QgsPointXY,
QgsCoordinateReferenceSystem,
QgsVectorFileWriter,
QgsFeatureRequest,
QgsProject,
QgsWkbTypes,
QgsRectangle,
QgsCoordinateTransform
)
from qgis.PyQt.QtCore import QDate, QTime, QDateTime, QVariant, QDir, QByteArray
import os
import tempfile
import osgeo.gdal # NOQA
from osgeo import gdal, ogr
from qgis.testing import start_app, unittest
from utilities import writeShape, compareWkt, unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
start_app()
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class TestFieldValueConverter(QgsVectorFileWriter.FieldValueConverter):
def __init__(self, layer):
QgsVectorFileWriter.FieldValueConverter.__init__(self)
self.layer = layer
def fieldDefinition(self, field):
idx = self.layer.fields().indexFromName(field.name())
if idx == 0:
return self.layer.fields()[idx]
elif idx == 2:
return QgsField('conv_attr', QVariant.String)
return QgsField('unexpected_idx')
def convert(self, idx, value):
if idx == 0:
return value
elif idx == 2:
if value == 3:
return 'converted_val'
else:
return 'unexpected_val!'
return 'unexpected_idx'
class TestQgsVectorFileWriter(unittest.TestCase):
mMemoryLayer = None
def testWrite(self):
"""Check we can write a vector file."""
self.mMemoryLayer = QgsVectorLayer(
('Point?crs=epsg:4326&field=name:string(20)&'
'field=age:integer&field=size:double&index=yes'),
'test',
'memory')
self.assertIsNotNone(self.mMemoryLayer, 'Provider not initialized')
myProvider = self.mMemoryLayer.dataProvider()
self.assertIsNotNone(myProvider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes(['Johny', 20, 0.3])
myResult, myFeatures = myProvider.addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
writeShape(self.mMemoryLayer, 'writetest.shp')
def testWriteWithLongLongField(self):
ml = QgsVectorLayer('NoGeometry?crs=epsg:4326&field=fldlonglong:long',
'test2', 'memory')
provider = ml.dataProvider()
feat = QgsFeature()
feat.setAttributes([2262000000])
provider.addFeatures([feat])
filename = os.path.join(str(QDir.tempPath()), 'with_longlong_field')
crs = QgsCoordinateReferenceSystem('EPSG:4326')
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(ml, filename, 'utf-8', crs, 'GPKG')
# open the resulting geopackage
vl = QgsVectorLayer(filename + '.gpkg', '', 'ogr')
self.assertTrue(vl.isValid())
# test values
idx = vl.fields().indexFromName('fldlonglong')
self.assertEqual(vl.getFeature(1).attributes()[idx], 2262000000)
del vl
os.unlink(filename + '.gpkg')
def testWriteWithBoolField(self):
# init connection string
dbconn = 'service=qgis_test'
if 'QGIS_PGTEST_DB' in os.environ:
dbconn = os.environ['QGIS_PGTEST_DB']
# create a vector layer
vl = QgsVectorLayer('{} table="qgis_test"."boolean_table" sql='.format(dbconn), "testbool", "postgres")
self.assertTrue(vl.isValid())
# check that 1 of its fields is a bool
fields = vl.fields()
self.assertEqual(fields.at(fields.indexFromName('fld1')).type(), QVariant.Bool)
# write a gpkg package with a bool field
crs = QgsCoordinateReferenceSystem('EPSG:4326')
filename = os.path.join(str(QDir.tempPath()), 'with_bool_field')
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(vl,
filename,
'utf-8',
crs,
'GPKG')
self.assertEqual(rc, QgsVectorFileWriter.NoError)
# open the resulting geopackage
vl = QgsVectorLayer(filename + '.gpkg', '', 'ogr')
self.assertTrue(vl.isValid())
fields = vl.fields()
# test type of converted field
idx = fields.indexFromName('fld1')
self.assertEqual(fields.at(idx).type(), QVariant.Bool)
# test values
self.assertEqual(vl.getFeature(1).attributes()[idx], 1)
self.assertEqual(vl.getFeature(2).attributes()[idx], 0)
del vl
os.unlink(filename + '.gpkg')
def testDateTimeWriteShapefile(self):
"""Check writing date and time fields to an ESRI shapefile."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&'
'field=date_f:date&field=time_f:time&field=dt_f:datetime'),
'test',
'memory')
self.assertTrue(ml.isValid())
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes([1, QDate(2014, 3, 5), QTime(13, 45, 22), QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22))])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'datetime.shp')
crs = QgsCoordinateReferenceSystem('EPSG:4326')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date_f')).type(), QVariant.Date)
# shapefiles do not support time types, result should be string
self.assertEqual(fields.at(fields.indexFromName('time_f')).type(), QVariant.String)
# shapefiles do not support datetime types, result should be string
self.assertEqual(fields.at(fields.indexFromName('dt_f')).type(), QVariant.String)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
date_idx = created_layer.fields().lookupField('date_f')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2014, 3, 5))
time_idx = created_layer.fields().lookupField('time_f')
# shapefiles do not support time types
self.assertIsInstance(f.attributes()[time_idx], str)
self.assertEqual(f.attributes()[time_idx], '13:45:22')
# shapefiles do not support datetime types
datetime_idx = created_layer.fields().lookupField('dt_f')
self.assertIsInstance(f.attributes()[datetime_idx], str)
self.assertEqual(f.attributes()[datetime_idx],
QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22)).toString("yyyy/MM/dd hh:mm:ss.zzz"))
def testWriterWithExtent(self):
"""Check writing using extent filter."""
source_file = os.path.join(TEST_DATA_DIR, 'points.shp')
source_layer = QgsVectorLayer(source_file, 'Points', 'ogr')
self.assertTrue(source_layer.isValid())
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'ESRI Shapefile'
options.filterExtent = QgsRectangle(-111, 26, -96, 38)
dest_file_name = os.path.join(str(QDir.tempPath()), 'extent_no_transform.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
source_layer,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
features = [f for f in created_layer.getFeatures()]
self.assertEqual(len(features), 5)
for f in features:
self.assertTrue(f.geometry().intersects(options.filterExtent))
def testWriterWithExtentAndReprojection(self):
"""Check writing using extent filter with reprojection."""
source_file = os.path.join(TEST_DATA_DIR, 'points.shp')
source_layer = QgsVectorLayer(source_file, 'Points', 'ogr')
self.assertTrue(source_layer.isValid())
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'ESRI Shapefile'
options.filterExtent = QgsRectangle(-12511460, 3045157, -10646621, 4683497)
options.ct = QgsCoordinateTransform(source_layer.crs(), QgsCoordinateReferenceSystem.fromEpsgId(3785), QgsProject.instance())
dest_file_name = os.path.join(str(QDir.tempPath()), 'extent_transform.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
source_layer,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
features = [f for f in created_layer.getFeatures()]
self.assertEqual(len(features), 5)
for f in features:
self.assertTrue(f.geometry().intersects(options.filterExtent))
def testDateTimeWriteTabfile(self):
"""Check writing date and time fields to an MapInfo tabfile."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&'
'field=date_f:date&field=time_f:time&field=dt_f:datetime'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
ft.setAttributes([1, QDate(2014, 3, 5), QTime(13, 45, 22), QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22))])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'datetime.tab')
crs = QgsCoordinateReferenceSystem('EPSG:4326')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'MapInfo File')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('date_f')).type(), QVariant.Date)
self.assertEqual(fields.at(fields.indexFromName('time_f')).type(), QVariant.Time)
self.assertEqual(fields.at(fields.indexFromName('dt_f')).type(), QVariant.DateTime)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
date_idx = created_layer.fields().lookupField('date_f')
self.assertIsInstance(f.attributes()[date_idx], QDate)
self.assertEqual(f.attributes()[date_idx], QDate(2014, 3, 5))
time_idx = created_layer.fields().lookupField('time_f')
self.assertIsInstance(f.attributes()[time_idx], QTime)
self.assertEqual(f.attributes()[time_idx], QTime(13, 45, 22))
datetime_idx = created_layer.fields().lookupField('dt_f')
self.assertIsInstance(f.attributes()[datetime_idx], QDateTime)
self.assertEqual(f.attributes()[datetime_idx], QDateTime(QDate(2014, 3, 5), QTime(13, 45, 22)))
def testWriteShapefileWithZ(self):
"""Check writing geometries with Z dimension to an ESRI shapefile."""
# start by saving a memory layer and forcing z
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('PointZ (1 2 3)'))
ft.setAttributes([1])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
# check with both a standard PointZ and 25d style Point25D type
for t in [QgsWkbTypes.PointZ, QgsWkbTypes.Point25D]:
dest_file_name = os.path.join(str(QDir.tempPath()), 'point_{}.shp'.format(QgsWkbTypes.displayString(t)))
crs = QgsCoordinateReferenceSystem('EPSG:4326')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
overrideGeometryType=t)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
expWkt = 'PointZ (1 2 3)'
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with Z failed: mismatch Expected:\n%s\nGot:\n%s\n" % (expWkt, wkt))
# also try saving out the shapefile version again, as an extra test
# this tests that saving a layer with z WITHOUT explicitly telling the writer to keep z values,
# will stay retain the z values
dest_file_name = os.path.join(str(QDir.tempPath()),
'point_{}_copy.shp'.format(QgsWkbTypes.displayString(t)))
crs = QgsCoordinateReferenceSystem('EPSG:4326')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
created_layer,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer_from_shp = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer_from_shp.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with Z failed: mismatch Expected:\n%s\nGot:\n%s\n" % (expWkt, wkt))
def testWriteShapefileWithMultiConversion(self):
"""Check writing geometries to an ESRI shapefile with conversion to multi."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('Point (1 2)'))
ft.setAttributes([1])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'to_multi.shp')
crs = QgsCoordinateReferenceSystem('EPSG:4326')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
forceMulti=True)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
expWkt = 'MultiPoint ((1 2))'
self.assertTrue(compareWkt(expWkt, wkt),
"saving geometry with multi conversion failed: mismatch Expected:\n%s\nGot:\n%s\n" % (
expWkt, wkt))
def testWriteShapefileWithAttributeSubsets(self):
"""Tests writing subsets of attributes to files."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=id:int&field=field1:int&field=field2:int&field=field3:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('Point (1 2)'))
ft.setAttributes([1, 11, 12, 13])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
# first write out with all attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'all_attributes.shp')
crs = QgsCoordinateReferenceSystem('EPSG:4326')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
attributes=[])
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 4)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['id'], 1)
self.assertEqual(f['field1'], 11)
self.assertEqual(f['field2'], 12)
self.assertEqual(f['field3'], 13)
# now test writing out only a subset of attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'subset_attributes.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
attributes=[1, 3])
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 2)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['field1'], 11)
self.assertEqual(f['field3'], 13)
# finally test writing no attributes
dest_file_name = os.path.join(str(QDir.tempPath()), 'no_attributes.shp')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'ESRI Shapefile',
skipAttributeCreation=True)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
# expect only a default 'FID' field for shapefiles
self.assertEqual(created_layer.fields().count(), 1)
self.assertEqual(created_layer.fields()[0].name(), 'FID')
# in this case we also check that the geometry exists, to make sure feature has been correctly written
# even without attributes
f = next(created_layer.getFeatures(QgsFeatureRequest()))
g = f.geometry()
wkt = g.asWkt()
expWkt = 'Point (1 2)'
self.assertTrue(compareWkt(expWkt, wkt),
"geometry not saved correctly when saving without attributes : mismatch Expected:\n%s\nGot:\n%s\n" % (
expWkt, wkt))
self.assertEqual(f['FID'], 0)
def testValueConverter(self):
"""Tests writing a layer with a field value converter."""
ml = QgsVectorLayer(
('Point?field=nonconv:int&field=ignored:string&field=converted:int'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
self.assertEqual(ml.fields().count(), 3)
ft = QgsFeature()
ft.setAttributes([1, 'ignored', 3])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'value_converter.shp')
converter = TestFieldValueConverter(ml)
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
QgsCoordinateReferenceSystem(),
'ESRI Shapefile',
attributes=[0, 2],
fieldValueConverter=converter)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
self.assertEqual(created_layer.fields().count(), 2)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f['nonconv'], 1)
self.assertEqual(f['conv_attr'], 'converted_val')
def testInteger64WriteTabfile(self):
"""Check writing Integer64 fields to an MapInfo tabfile (which does not support that type)."""
ml = QgsVectorLayer(
('Point?crs=epsg:4326&field=int8:int8'),
'test',
'memory')
self.assertIsNotNone(ml, 'Provider not initialized')
self.assertTrue(ml.isValid(), 'Source layer not valid')
provider = ml.dataProvider()
self.assertIsNotNone(provider)
ft = QgsFeature()
ft.setAttributes([2123456789])
res, features = provider.addFeatures([ft])
self.assertTrue(res)
self.assertTrue(features)
dest_file_name = os.path.join(str(QDir.tempPath()), 'integer64.tab')
crs = QgsCoordinateReferenceSystem('EPSG:4326')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
dest_file_name,
'utf-8',
crs,
'MapInfo File')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer('{}|layerid=0'.format(dest_file_name), 'test', 'ogr')
fields = created_layer.dataProvider().fields()
self.assertEqual(fields.at(fields.indexFromName('int8')).type(), QVariant.Double)
f = next(created_layer.getFeatures(QgsFeatureRequest()))
int8_idx = created_layer.fields().lookupField('int8')
self.assertEqual(f.attributes()[int8_idx], 2123456789)
def testDefaultDatasetOptions(self):
""" Test retrieving default dataset options for a format """
# NOTE - feel free to adapt these if the defaults change!
options = QgsVectorFileWriter.defaultDatasetOptions('not a format')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultDatasetOptions('ESRI Shapefile')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultDatasetOptions('GML')
# just test a few
self.assertTrue('GML3_LONGSRS=YES' in options)
self.assertTrue('STRIP_PREFIX=NO' in options)
def testDefaultLayerOptions(self):
""" Test retrieving default layer options for a format """
# NOTE - feel free to adapt these if the defaults change!
options = QgsVectorFileWriter.defaultLayerOptions('not a format')
self.assertEqual(options, [])
options = QgsVectorFileWriter.defaultLayerOptions('ESRI Shapefile')
self.assertEqual(options, ['RESIZE=NO'])
options = QgsVectorFileWriter.defaultLayerOptions('GML')
self.assertEqual(options, [])
def testOverwriteLayer(self):
"""Tests writing a layer with a field value converter."""
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([1])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename, update=1)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 1)
ds.CreateLayer('another_layer')
del f
del lyr
del ds
caps = QgsVectorFileWriter.editionCapabilities(filename)
self.assertTrue((caps & QgsVectorFileWriter.CanAddNewLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanAppendToExistingLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanAddNewFieldsToExistingLayer))
self.assertTrue((caps & QgsVectorFileWriter.CanDeleteLayer))
self.assertTrue(QgsVectorFileWriter.targetLayerExists(filename, 'test'))
self.assertFalse(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0]))
# Test CreateOrOverwriteLayer
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([2])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.CreateOrOverwriteLayer
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 2)
# another_layer should still exist
self.assertIsNotNone(ds.GetLayerByName('another_layer'))
del f
del lyr
del ds
# Test CreateOrOverwriteFile
ml = QgsVectorLayer('Point?field=firstfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([3])
provider.addFeatures([ft])
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
# another_layer should no longer exist
self.assertIsNone(ds.GetLayerByName('another_layer'))
del f
del lyr
del ds
# Test AppendToLayerNoNewFields
ml = QgsVectorLayer('Point?field=firstfield:int&field=secondfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([4, -10])
provider.addFeatures([ft])
self.assertTrue(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0, 1]))
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.AppendToLayerNoNewFields
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertEqual(lyr.GetLayerDefn().GetFieldCount(), 1)
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 4)
del f
del lyr
del ds
# Test AppendToLayerAddFields
ml = QgsVectorLayer('Point?field=firstfield:int&field=secondfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([5, -1])
provider.addFeatures([ft])
self.assertTrue(QgsVectorFileWriter.areThereNewFieldsToCreate(filename, 'test', ml, [0, 1]))
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.AppendToLayerAddFields
filename = '/vsimem/out.gpkg'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('test')
self.assertEqual(lyr.GetLayerDefn().GetFieldCount(), 2)
self.assertIsNotNone(lyr)
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 3)
if hasattr(f, "IsFieldSetAndNotNull"):
# GDAL >= 2.2
self.assertFalse(f.IsFieldSetAndNotNull('secondfield'))
else:
self.assertFalse(f.IsFieldSet('secondfield'))
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 4)
if hasattr(f, "IsFieldSetAndNotNull"):
self.assertFalse(f.IsFieldSetAndNotNull('secondfield'))
else:
self.assertFalse(f.IsFieldSet('secondfield'))
f = lyr.GetNextFeature()
self.assertEqual(f['firstfield'], 5)
self.assertEqual(f['secondfield'], -1)
del f
del lyr
del ds
gdal.Unlink(filename)
def testSupportedFiltersAndFormat(self):
# test with formats in recommended order
formats = QgsVectorFileWriter.supportedFiltersAndFormats(QgsVectorFileWriter.SortRecommended)
self.assertEqual(formats[0].filterString, 'GeoPackage (*.gpkg *.GPKG)')
self.assertEqual(formats[0].driverName, 'GPKG')
self.assertEqual(formats[0].globs, ['*.gpkg'])
self.assertEqual(formats[1].filterString, 'ESRI Shapefile (*.shp *.SHP)')
self.assertEqual(formats[1].driverName, 'ESRI Shapefile')
self.assertEqual(formats[1].globs, ['*.shp'])
self.assertTrue('ODS' in [f.driverName for f in formats])
self.assertTrue('PGDUMP' in [f.driverName for f in formats])
interlis_format = [f for f in formats if f.driverName == 'Interlis 2'][0]
self.assertEqual(interlis_format.globs, ['*.xtf', '*.xml', '*.ili'])
# alphabetical sorting
formats2 = QgsVectorFileWriter.supportedFiltersAndFormats(QgsVectorFileWriter.VectorFormatOptions())
self.assertTrue(formats2[0].driverName < formats2[1].driverName)
self.assertCountEqual([f.driverName for f in formats], [f.driverName for f in formats2])
self.assertNotEqual(formats2[0].driverName, 'GeoPackage')
# skip non-spatial
formats = QgsVectorFileWriter.supportedFiltersAndFormats(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ODS' in [f.driverName for f in formats])
def testOgrDriverList(self):
# test with drivers in recommended order
drivers = QgsVectorFileWriter.ogrDriverList(QgsVectorFileWriter.SortRecommended)
self.assertEqual(drivers[0].longName, 'GeoPackage')
self.assertEqual(drivers[0].driverName, 'GPKG')
self.assertEqual(drivers[1].longName, 'ESRI Shapefile')
self.assertEqual(drivers[1].driverName, 'ESRI Shapefile')
self.assertTrue('ODS' in [f.driverName for f in drivers])
# ensure that XLSX comes before SQLite, because we should sort on longName, not driverName!
ms_xlsx_index = next(i for i, v in enumerate(drivers) if v.driverName == 'XLSX')
sqlite_index = next(i for i, v in enumerate(drivers) if v.driverName == 'SQLite')
self.assertLess(ms_xlsx_index, sqlite_index)
self.assertIn('[XLSX]', drivers[ms_xlsx_index].longName)
# alphabetical sorting
drivers2 = QgsVectorFileWriter.ogrDriverList(QgsVectorFileWriter.VectorFormatOptions())
self.assertTrue(drivers2[0].longName < drivers2[1].longName)
self.assertCountEqual([d.driverName for d in drivers], [d.driverName for d in drivers2])
self.assertNotEqual(drivers2[0].driverName, 'GPKG')
# skip non-spatial
formats = QgsVectorFileWriter.ogrDriverList(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ODS' in [f.driverName for f in formats])
def testSupportedFormatExtensions(self):
formats = QgsVectorFileWriter.supportedFormatExtensions()
self.assertTrue('gpkg' in formats)
self.assertFalse('exe' in formats)
self.assertEqual(formats[0], 'gpkg')
self.assertEqual(formats[1], 'shp')
self.assertTrue('ods' in formats)
self.assertTrue('xtf' in formats)
self.assertTrue('ili' in formats)
for i in range(2, len(formats) - 1):
self.assertLess(formats[i].lower(), formats[i + 1].lower())
# alphabetical sorting
formats2 = QgsVectorFileWriter.supportedFormatExtensions(QgsVectorFileWriter.VectorFormatOptions())
self.assertTrue(formats2[0] < formats2[1])
self.assertCountEqual(formats, formats2)
self.assertNotEqual(formats2[0], 'gpkg')
for i in range(0, len(formats2) - 1):
self.assertLess(formats2[i].lower(), formats2[i + 1].lower())
formats = QgsVectorFileWriter.supportedFormatExtensions(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ods' in formats)
def testFileFilterString(self):
formats = QgsVectorFileWriter.fileFilterString()
self.assertTrue('gpkg' in formats)
self.assertTrue('shp' in formats)
self.assertLess(formats.index('gpkg'), formats.index('shp'))
self.assertTrue('ods' in formats)
parts = formats.split(';;')
for i in range(2, len(parts) - 1):
self.assertLess(parts[i].lower(), parts[i + 1].lower())
# alphabetical sorting
formats2 = QgsVectorFileWriter.fileFilterString(QgsVectorFileWriter.VectorFormatOptions())
self.assertNotEqual(formats.index('gpkg'), formats2.index('gpkg'))
parts = formats2.split(';;')
for i in range(len(parts) - 1):
self.assertLess(parts[i].lower(), parts[i + 1].lower())
# hide non spatial
formats = QgsVectorFileWriter.fileFilterString(QgsVectorFileWriter.SkipNonSpatialFormats)
self.assertFalse('ods' in formats)
def testDriverForExtension(self):
self.assertEqual(QgsVectorFileWriter.driverForExtension('shp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('SHP'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('sHp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('.shp'), 'ESRI Shapefile')
self.assertEqual(QgsVectorFileWriter.driverForExtension('tab'), 'MapInfo File')
self.assertEqual(QgsVectorFileWriter.driverForExtension('.GML'), 'GML')
self.assertEqual(QgsVectorFileWriter.driverForExtension('not a format'), '')
self.assertEqual(QgsVectorFileWriter.driverForExtension(''), '')
def testSupportsFeatureStyles(self):
self.assertFalse(QgsVectorFileWriter.supportsFeatureStyles('ESRI Shapefile'))
self.assertFalse(QgsVectorFileWriter.supportsFeatureStyles('not a driver'))
self.assertTrue(QgsVectorFileWriter.supportsFeatureStyles('DXF'))
self.assertTrue(QgsVectorFileWriter.supportsFeatureStyles('KML'))
self.assertTrue(QgsVectorFileWriter.supportsFeatureStyles('MapInfo File'))
self.assertTrue(QgsVectorFileWriter.supportsFeatureStyles('MapInfo MIF'))
def testOverwriteGPKG(self):
"""Test that overwriting the same origin GPKG file works only if the layername is different"""
# Prepare test data
ml = QgsVectorLayer('Point?field=firstfield:int&field=secondfield:int', 'test', 'memory')
provider = ml.dataProvider()
ft = QgsFeature()
ft.setAttributes([4, -10])
provider.addFeatures([ft])
filehandle, filename = tempfile.mkstemp('.gpkg')
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'GPKG'
options.layerName = 'test'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Real test
vl = QgsVectorLayer("%s|layername=test" % filename, 'src_test', 'ogr')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 1)
# This must fail
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
vl,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.ErrCreateDataSource)
self.assertEqual(error_message, 'Cannot overwrite a OGR layer in place')
options.layerName = 'test2'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
vl,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
def testCreateDGN(self):
ml = QgsVectorLayer('Point?crs=epsg:4326', 'test', 'memory')
provider = ml.dataProvider()
feat = QgsFeature()
feat.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
provider.addFeatures([feat])
filename = os.path.join(str(QDir.tempPath()), 'testCreateDGN.dgn')
crs = QgsCoordinateReferenceSystem('EPSG:4326')
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(ml, filename, 'utf-8', crs, 'DGN')
# open the resulting file
vl = QgsVectorLayer(filename, '', 'ogr')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 1)
del vl
# append
options = QgsVectorFileWriter.SaveVectorOptions()
options.driverName = 'DGN'
options.layerName = 'test'
options.actionOnExistingFile = QgsVectorFileWriter.AppendToLayerNoNewFields
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
ml,
filename,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# open the resulting file
vl = QgsVectorLayer(filename, '', 'ogr')
self.assertTrue(vl.isValid())
self.assertEqual(vl.featureCount(), 2)
del vl
os.unlink(filename)
def testAddZ(self):
"""Check adding z values to non z input."""
input = QgsVectorLayer(
'Point?crs=epsg:4326&field=name:string(20)',
'test',
'memory')
self.assertTrue(input.isValid(), 'Provider not initialized')
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(10, 10)))
myResult, myFeatures = input.dataProvider().addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
dest_file_name = os.path.join(str(QDir.tempPath()), 'add_z.geojson')
options = QgsVectorFileWriter.SaveVectorOptions()
options.overrideGeometryType = QgsWkbTypes.PointZ
options.driverName = 'GeoJSON'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
input,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer(dest_file_name, 'test', 'ogr')
self.assertTrue(created_layer.isValid())
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f.geometry().asWkt(), 'PointZ (10 10 0)')
def testDropZ(self):
"""Check dropping z values input."""
input = QgsVectorLayer(
'PointZ?crs=epsg:4326&field=name:string(20)',
'test',
'memory')
self.assertTrue(input.isValid(), 'Provider not initialized')
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('PointM(10 10 2)'))
myResult, myFeatures = input.dataProvider().addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
dest_file_name = os.path.join(str(QDir.tempPath()), 'drop_z.geojson')
options = QgsVectorFileWriter.SaveVectorOptions()
options.overrideGeometryType = QgsWkbTypes.PointM
options.driverName = 'GeoJSON'
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
input,
dest_file_name,
options)
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer(dest_file_name, 'test', 'ogr')
self.assertTrue(created_layer.isValid())
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f.geometry().asWkt(), 'Point (10 10)')
@unittest.skip(int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 4, 0))
def testWriteWithStringListField(self):
"""
Test writing with a string list field
:return:
"""
tmpfile = os.path.join(self.basetestpath, 'newstringlistfield.gml')
ds = ogr.GetDriverByName('GML').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('strfield', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('intfield', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('strlistfield', ogr.OFTStringList))
ds = None
vl = QgsVectorLayer(tmpfile)
self.assertTrue(vl.isValid())
# write a gml dataset with a string list field
filename = os.path.join(str(QDir.tempPath()), 'with_stringlist_field.gml')
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(vl,
filename,
'utf-8',
vl.crs(),
'GML')
self.assertEqual(rc, QgsVectorFileWriter.NoError)
# open the resulting gml
vl = QgsVectorLayer(filename, '', 'ogr')
self.assertTrue(vl.isValid())
fields = vl.fields()
# test type of converted field
idx = fields.indexFromName('strlistfield')
self.assertEqual(fields.at(idx).type(), QVariant.List)
self.assertEqual(fields.at(idx).subType(), QVariant.String)
del vl
os.unlink(filename)
def testWriteWithBinaryField(self):
"""
Test writing with a binary field
:return:
"""
basetestpath = tempfile.mkdtemp()
tmpfile = os.path.join(basetestpath, 'binaryfield.sqlite')
ds = ogr.GetDriverByName('SQLite').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint, options=['FID=fid'])
lyr.CreateField(ogr.FieldDefn('strfield', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('intfield', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('binfield', ogr.OFTBinary))
lyr.CreateField(ogr.FieldDefn('binfield2', ogr.OFTBinary))
f = None
ds = None
vl = QgsVectorLayer(tmpfile)
self.assertTrue(vl.isValid())
# check that 1 of its fields is a bool
fields = vl.fields()
self.assertEqual(fields.at(fields.indexFromName('binfield')).type(), QVariant.ByteArray)
dp = vl.dataProvider()
f = QgsFeature(fields)
bin_1 = b'xxx'
bin_2 = b'yyy'
bin_val1 = QByteArray(bin_1)
bin_val2 = QByteArray(bin_2)
f.setAttributes([1, 'str', 100, bin_val1, bin_val2])
self.assertTrue(dp.addFeature(f))
# write a gpkg package with a binary field
filename = os.path.join(str(QDir.tempPath()), 'with_bin_field')
rc, errmsg = QgsVectorFileWriter.writeAsVectorFormat(vl,
filename,
'utf-8',
vl.crs(),
'GPKG')
self.assertEqual(rc, QgsVectorFileWriter.NoError)
# open the resulting geopackage
vl = QgsVectorLayer(filename + '.gpkg', '', 'ogr')
self.assertTrue(vl.isValid())
fields = vl.fields()
# test type of converted field
idx = fields.indexFromName('binfield')
self.assertEqual(fields.at(idx).type(), QVariant.ByteArray)
idx2 = fields.indexFromName('binfield2')
self.assertEqual(fields.at(idx2).type(), QVariant.ByteArray)
# test values
self.assertEqual(vl.getFeature(1).attributes()[idx], bin_val1)
self.assertEqual(vl.getFeature(1).attributes()[idx2], bin_val2)
del vl
os.unlink(filename + '.gpkg')
def testWriteKMLAxisOrderIssueGDAL3(self):
"""Check axis order issue when writing KML with EPSG:4326."""
if not ogr.GetDriverByName('KML'):
return
vl = QgsVectorLayer(
'PointZ?crs=epsg:4326&field=name:string(20)',
'test',
'memory')
self.assertTrue(vl.isValid(), 'Provider not initialized')
ft = QgsFeature()
ft.setGeometry(QgsGeometry.fromWkt('Point(2 49)'))
myResult, myFeatures = vl.dataProvider().addFeatures([ft])
self.assertTrue(myResult)
self.assertTrue(myFeatures)
dest_file_name = os.path.join(str(QDir.tempPath()), 'testWriteKMLAxisOrderIssueGDAL3.kml')
write_result, error_message = QgsVectorFileWriter.writeAsVectorFormat(
vl,
dest_file_name,
'utf-8',
vl.crs(),
'KML')
self.assertEqual(write_result, QgsVectorFileWriter.NoError, error_message)
# Open result and check
created_layer = QgsVectorLayer(dest_file_name, 'test', 'ogr')
self.assertTrue(created_layer.isValid())
f = next(created_layer.getFeatures(QgsFeatureRequest()))
self.assertEqual(f.geometry().asWkt(), 'PointZ (2 49 0)')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.