text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import re
from unittest import mock
import grpc
import pytest
from celery.states import EXCEPTION_STATES, PROPAGATE_STATES, SUCCESS
from clearly.client import ClearlyClient, ModeTask, ModeWorker
from clearly.client.client import Modes
from clearly.protos.clearly_pb2 import Null, PatternFilter, RealtimeMessage, SeenTasksMessage, \
StatsMessage, TaskMessage, WorkerMessage
@pytest.fixture
def mocked_client():
with mock.patch('grpc.insecure_channel'), \
mock.patch('clearly.client.client.ClearlyServerStub'):
yield ClearlyClient()
@pytest.fixture
def mocked_display(mocked_client):
with mock.patch('clearly.client.ClearlyClient._display_task'), \
mock.patch('clearly.client.ClearlyClient._display_worker'):
yield mocked_client
@pytest.fixture
def task_message():
yield TaskMessage(
name='name', routing_key='routing_key', uuid='uuid', retries=2,
args='args', kwargs='kwargs', result='result', traceback='traceback',
timestamp=123.1, state='ANY', result_meta='meta',
)
@pytest.fixture
def worker_message():
yield WorkerMessage(
hostname='hostname', pid=12000, sw_sys='sw_sys', sw_ident='sw_ident',
sw_ver='sw_ver', loadavg=[1., 2., 3.], processed=5432, state='state',
freq=5, heartbeats=[234.2],
)
# noinspection PyProtectedMember
def test_client_reset(mocked_client):
mocked_client.reset_tasks()
mocked_client._stub.reset_tasks.assert_called_once_with(Null())
# noinspection PyProtectedMember
def test_client_seen_tasks_do_print(mocked_client, capsys):
inner_tasks = ['app{i}.task{i}'.format(i=i) for i in range(3)]
tasks = SeenTasksMessage()
tasks.task_types.extend(inner_tasks)
mocked_client._stub.seen_tasks.return_value = tasks
mocked_client.seen_tasks()
generated = filter(None, capsys.readouterr().out.split('\n'))
assert all(any(re.search(re.escape(t), x) for x in generated) for t in inner_tasks)
# noinspection PyProtectedMember
def test_client_capture_task(task_message, mode_task_type, mocked_display):
mocked_display._stub.capture_realtime.return_value = \
(RealtimeMessage(task=task_message),)
mocked_display.capture(modes=mode_task_type)
mocked_display._display_task.assert_called_once_with(task_message, mode_task_type)
# noinspection PyProtectedMember
def test_client_capture_ignore_unknown(mocked_display):
mocked_display._stub.capture_realtime.return_value = (RealtimeMessage(),)
mocked_display.capture()
mocked_display._display_task.assert_not_called()
mocked_display._display_worker.assert_not_called()
# noinspection PyProtectedMember
def test_client_capture_worker(worker_message, mode_worker_type, mocked_display):
mocked_display._stub.capture_realtime.return_value = \
(RealtimeMessage(worker=worker_message),)
mocked_display.capture(modes=mode_worker_type)
mocked_display._display_worker.assert_called_once_with(worker_message, mode_worker_type)
# noinspection PyProtectedMember
@pytest.mark.parametrize('method, stub', [
('capture_tasks', 'capture_realtime'),
('capture_workers', 'capture_realtime'),
('capture', 'capture_realtime'),
('tasks', 'filter_tasks'),
('workers', 'filter_workers'),
('seen_tasks', 'seen_tasks'),
('reset_tasks', 'reset_tasks'),
('metrics', 'get_metrics'),
])
def test_client_methods_have_user_friendly_grpc_errors(method, stub, mocked_display, capsys):
exc = grpc.RpcError()
exc.code, exc.details = lambda: 'StatusCode', lambda: 'details'
getattr(mocked_display._stub, stub).side_effect = exc
with mock.patch.object(mocked_display, '_debug', False):
getattr(mocked_display, method)()
generated = capsys.readouterr().out
assert 'Server communication error' in generated
assert 'StatusCode' in generated
assert 'details' in generated
# noinspection PyProtectedMember
@pytest.mark.parametrize('method, stub', [
('capture_tasks', 'capture_realtime'),
('capture_workers', 'capture_realtime'),
('capture', 'capture_realtime'),
('tasks', 'filter_tasks'),
('workers', 'filter_workers'),
('seen_tasks', 'seen_tasks'),
('reset_tasks', 'reset_tasks'),
('metrics', 'get_metrics'),
])
def test_client_methods_trigger_grpc_errors_when_debugging(method, stub, mocked_client):
rpc_error = grpc.RpcError()
rpc_error.details = rpc_error.code = mock.Mock()
getattr(mocked_client._stub, stub).side_effect = rpc_error
mocked_client._debug = True
with mock.patch.object(mocked_client, '_debug', True), pytest.raises(grpc.RpcError):
getattr(mocked_client, method)()
# noinspection PyProtectedMember
@pytest.mark.parametrize('method', [
'capture_tasks', 'capture_workers', 'capture', 'tasks', 'workers'
])
def test_client_methods_have_user_friendly_warnings(method, mocked_display, capsys):
with mock.patch('clearly.client.ClearlyClient._parse_pattern') as mock_parse:
mock_parse.return_value = None
getattr(mocked_display, method)()
generated = capsys.readouterr().out
assert 'Nothing would be selected.' in generated
def test_client_capture_tasks(mocked_client):
with mock.patch.object(mocked_client, 'capture') as mocked_capture:
mocked_client.capture_tasks()
mocked_capture.assert_called_once_with(
tasks=mock.ANY, modes=mock.ANY,
workers='!',
)
def test_client_capture_workers(mocked_client):
with mock.patch.object(mocked_client, 'capture') as mocked_capture:
mocked_client.capture_workers()
mocked_capture.assert_called_once_with(
workers=mock.ANY, modes=mock.ANY,
tasks='!',
)
# noinspection PyProtectedMember
def test_client_metrics_do_print(mocked_client, capsys):
data = dict(task_count=1234, event_count=5678, len_tasks=2244, len_workers=333)
mocked_client._stub.get_metrics.return_value = StatsMessage(**data)
mocked_client.metrics()
generated = capsys.readouterr().out
assert all(re.search(str(x), generated) for x in data.values())
# noinspection PyProtectedMember
def test_client_tasks(task_message, mode_task_type, mocked_display):
mocked_display._stub.filter_tasks.return_value = (task_message,)
mocked_display.tasks(mode=mode_task_type)
mocked_display._display_task.assert_called_once_with(task_message, mode_task_type)
# noinspection PyProtectedMember
def test_client_workers(worker_message, mode_worker_type, mocked_display):
mocked_display._stub.filter_workers.return_value = (worker_message,)
mocked_display.workers(mode=mode_worker_type)
mocked_display._display_worker.assert_called_once_with(worker_message, mode_worker_type)
# noinspection PyProtectedMember
def test_client_display_task(task_message, mode_task_type, mocked_client,
task_state_plus_2, capsys, strip_colors):
if task_state_plus_2 == '?1':
task_state_type = task_message.state = ''
elif task_state_plus_2 == '?2':
task_state_type = task_message.state = SUCCESS
task_message.result = str(None)
else:
task_state_type = task_message.state = task_state_plus_2
mocked_client._display_task(task_message, mode_task_type)
generated = strip_colors(capsys.readouterr().out)
assert task_message.name in generated
assert task_message.uuid in generated
if not task_state_type:
assert task_message.routing_key in generated
else:
assert task_state_type in generated
params, success, error = mode_task_type.spec
show_outcome = (task_message.state in PROPAGATE_STATES and error) \
or (task_message.state == SUCCESS and success)
# params
first_seen = bool(params) and not task_state_type
params_outcome = params is not False and show_outcome
show_params = first_seen or params_outcome
assert show_params == (task_message.args in generated)
assert show_params == (task_message.kwargs in generated)
# outcome
if show_outcome:
if task_state_type == SUCCESS:
if task_message.result == str(None):
assert '==> <meta> :)' in generated
else:
assert '==> <meta> \'result\'' in generated
elif task_state_type in EXCEPTION_STATES:
assert '==> traceback' in generated
else:
assert '==>' not in generated
# noinspection PyProtectedMember
def test_client_display_worker(worker_message, mode_worker_type, worker_state_type,
bool1, bool2, mocked_client, capsys, strip_colors):
worker_message.state = worker_state_type
if bool1:
worker_message.heartbeats.pop()
if bool2:
worker_message.timestamp = 123.1
with mock.patch('clearly.client.ClearlyClient._worker_state') as m_worker_state:
mocked_client._display_worker(worker_message, mode_worker_type)
generated = strip_colors(capsys.readouterr().out)
m_worker_state.assert_called_once_with(worker_state_type)
assert worker_message.hostname in generated
assert str(worker_message.pid) in generated
stats, = mode_worker_type.spec
if stats:
assert 'sw_sys' in generated
assert 'sw_ident' in generated
assert 'sw_ver' in generated
assert '[1.0, 2.0, 3.0]' in generated
assert 'heartb:' in generated
# noinspection PyProtectedMember
def test_client_task_state(task_state_type, mocked_client):
result = mocked_client._task_state(task_state_type)
assert task_state_type in result
# noinspection PyProtectedMember
def test_client_worker_state(worker_state_type, mocked_client):
result = mocked_client._worker_state(worker_state_type)
assert worker_state_type in result
# noinspection PyProtectedMember
@pytest.mark.parametrize('value', [
True, 1, object()
])
def test_parse_pattern_error(value, mocked_client):
with pytest.raises(UserWarning):
mocked_client._parse_pattern(value)
# noinspection PyProtectedMember
@pytest.mark.parametrize('value, expected', [
(None, PatternFilter(pattern='.', negate=False)),
(' ', PatternFilter(pattern='.', negate=False)),
('pattern', PatternFilter(pattern='pattern', negate=False)),
(' pattern ', PatternFilter(pattern='pattern', negate=False)),
('!pattern', PatternFilter(pattern='pattern', negate=True)),
('! pattern', PatternFilter(pattern='pattern', negate=True)),
('!', None),
('!.', None),
('! . ', None),
])
def test_parse_pattern(value, expected, mocked_client):
assert mocked_client._parse_pattern(value) == expected
def test_display_modes_task_indicator(mode_task_type, mocked_client, capsys):
mocked_client._modes = Modes(mode_task_type, ModeWorker.WORKER)
mocked_client.display_modes()
assert '* ' + mode_task_type.name in capsys.readouterr().out
def test_display_modes_worker_indicator(mode_worker_type, mocked_client, capsys):
mocked_client._modes = Modes(ModeTask.TASK, mode_worker_type)
mocked_client.display_modes()
assert '* ' + mode_worker_type.name in capsys.readouterr().out
def test_display_modes_set_params(mocked_client):
with mock.patch.object(mocked_client, '_get_display_modes') as mock_gdm:
mocked_client.display_modes(1)
mock_gdm.assert_called_once_with((1,))
# noinspection PyProtectedMember
assert mocked_client._modes == mock_gdm()
@pytest.mark.parametrize('modes', [
(ModeTask.SENT, ModeTask.TASK),
(ModeWorker.STATS, ModeWorker.WORKER),
(ModeTask.SENT, ModeWorker.STATS, ModeWorker.STATS),
])
def test_get_display_modes_error(modes, mocked_client):
with mock.patch('clearly.client.client.find_mode') as mock_find_mode, \
pytest.raises(UserWarning):
mock_find_mode.side_effect = lambda x: x
# noinspection PyProtectedMember
mocked_client._get_display_modes(modes)
@pytest.mark.parametrize('modes, expected', [
(None, (ModeTask.TASK, ModeWorker.WORKER)),
((None,), (ModeTask.TASK, ModeWorker.WORKER)),
((None, None), (ModeTask.TASK, ModeWorker.WORKER)),
(ModeTask.SENT, (ModeTask.SENT, ModeWorker.WORKER)),
((ModeTask.SENT,), (ModeTask.SENT, ModeWorker.WORKER)),
((ModeTask.SENT, None), (ModeTask.SENT, ModeWorker.WORKER)),
((None, ModeTask.SENT), (ModeTask.SENT, ModeWorker.WORKER)),
(ModeWorker.STATS, (ModeTask.TASK, ModeWorker.STATS)),
((ModeWorker.STATS,), (ModeTask.TASK, ModeWorker.STATS)),
((ModeWorker.STATS, None), (ModeTask.TASK, ModeWorker.STATS)),
((None, ModeWorker.STATS), (ModeTask.TASK, ModeWorker.STATS)),
((ModeTask.SENT, ModeWorker.STATS), (ModeTask.SENT, ModeWorker.STATS)),
((ModeWorker.STATS, ModeTask.SENT), (ModeTask.SENT, ModeWorker.STATS)),
])
def test_get_display_modes_ok(modes, expected, mocked_client):
mocked_client._modes = Modes(ModeTask.TASK, ModeWorker.WORKER) # known defaults.
with mock.patch('clearly.client.client.find_mode') as mock_find_mode:
mock_find_mode.side_effect = lambda x: x
# noinspection PyProtectedMember
assert mocked_client._get_display_modes(modes) == expected
|
{
"content_hash": "c09411baa42002c8e091cfb17bb9901f",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 96,
"avg_line_length": 37.67048710601719,
"alnum_prop": 0.6918688674222256,
"repo_name": "rsalmei/clearly",
"id": "e02e1b2e520f54145e7828738efb9ae5820a87a5",
"size": "13147",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/client/test_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "547"
},
{
"name": "Makefile",
"bytes": "2987"
},
{
"name": "Python",
"bytes": "112536"
}
],
"symlink_target": ""
}
|
import platform
import unittest
import warnings
from test.test_support import (fcmp, have_unicode, TESTFN, unlink,
run_unittest, _check_py3k_warnings, check_warnings)
from operator import neg
import sys, cStringIO, random, UserDict
# count the number of test runs.
# used to skip running test_execfile() multiple times
# and to create unique strings to intern in test_intern()
numruns = 0
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
class StrSquares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max:
raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(str(n*n))
n += 1
return self.sofar[i]
class BitBucket:
def write(self, line):
pass
class TestFailingBool:
def __nonzero__(self):
raise RuntimeError
class TestFailingIter:
def __iter__(self):
raise RuntimeError
class BuiltinTest(unittest.TestCase):
def test_import(self):
__import__('sys')
__import__('time')
__import__('string')
__import__(name='sys')
__import__(name='time', level=0)
self.assertRaises(ImportError, __import__, 'spamspam')
self.assertRaises(TypeError, __import__, 1, 2, 3, 4)
self.assertRaises(ValueError, __import__, '')
self.assertRaises(TypeError, __import__, 'sys', name='sys')
def test_abs(self):
# int
self.assertEqual(abs(0), 0)
self.assertEqual(abs(1234), 1234)
self.assertEqual(abs(-1234), 1234)
self.assertTrue(abs(-sys.maxint-1) > 0)
# float
self.assertEqual(abs(0.0), 0.0)
self.assertEqual(abs(3.14), 3.14)
self.assertEqual(abs(-3.14), 3.14)
# long
self.assertEqual(abs(0L), 0L)
self.assertEqual(abs(1234L), 1234L)
self.assertEqual(abs(-1234L), 1234L)
# str
self.assertRaises(TypeError, abs, 'a')
def test_all(self):
self.assertEqual(all([2, 4, 6]), True)
self.assertEqual(all([2, None, 6]), False)
self.assertRaises(RuntimeError, all, [2, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, all, 10) # Non-iterable
self.assertRaises(TypeError, all) # No args
self.assertRaises(TypeError, all, [2, 4, 6], []) # Too many args
self.assertEqual(all([]), True) # Empty iterator
S = [50, 60]
self.assertEqual(all(x > 42 for x in S), True)
S = [50, 40, 60]
self.assertEqual(all(x > 42 for x in S), False)
def test_any(self):
self.assertEqual(any([None, None, None]), False)
self.assertEqual(any([None, 4, None]), True)
self.assertRaises(RuntimeError, any, [None, TestFailingBool(), 6])
self.assertRaises(RuntimeError, all, TestFailingIter())
self.assertRaises(TypeError, any, 10) # Non-iterable
self.assertRaises(TypeError, any) # No args
self.assertRaises(TypeError, any, [2, 4, 6], []) # Too many args
self.assertEqual(any([]), False) # Empty iterator
S = [40, 60, 30]
self.assertEqual(any(x > 42 for x in S), True)
S = [10, 20, 30]
self.assertEqual(any(x > 42 for x in S), False)
def test_neg(self):
x = -sys.maxint-1
self.assert_(isinstance(x, int))
self.assertEqual(-x, sys.maxint+1)
def test_apply(self):
def f0(*args):
self.assertEqual(args, ())
def f1(a1):
self.assertEqual(a1, 1)
def f2(a1, a2):
self.assertEqual(a1, 1)
self.assertEqual(a2, 2)
def f3(a1, a2, a3):
self.assertEqual(a1, 1)
self.assertEqual(a2, 2)
self.assertEqual(a3, 3)
apply(f0, ())
apply(f1, (1,))
apply(f2, (1, 2))
apply(f3, (1, 2, 3))
# A PyCFunction that takes only positional parameters should allow an
# empty keyword dictionary to pass without a complaint, but raise a
# TypeError if the dictionary is non-empty.
apply(id, (1,), {})
self.assertRaises(TypeError, apply, id, (1,), {"foo": 1})
self.assertRaises(TypeError, apply)
self.assertRaises(TypeError, apply, id, 42)
self.assertRaises(TypeError, apply, id, (42,), 42)
def test_callable(self):
self.assert_(callable(len))
def f(): pass
self.assert_(callable(f))
class C:
def meth(self): pass
self.assert_(callable(C))
x = C()
self.assert_(callable(x.meth))
self.assert_(not callable(x))
class D(C):
def __call__(self): pass
y = D()
self.assert_(callable(y))
y()
def test_chr(self):
self.assertEqual(chr(32), ' ')
self.assertEqual(chr(65), 'A')
self.assertEqual(chr(97), 'a')
self.assertEqual(chr(0xff), '\xff')
self.assertRaises(ValueError, chr, 256)
self.assertRaises(TypeError, chr)
def test_cmp(self):
self.assertEqual(cmp(-1, 1), -1)
self.assertEqual(cmp(1, -1), 1)
self.assertEqual(cmp(1, 1), 0)
# verify that circular objects are not handled
a = []; a.append(a)
b = []; b.append(b)
from UserList import UserList
c = UserList(); c.append(c)
self.assertRaises(RuntimeError, cmp, a, b)
self.assertRaises(RuntimeError, cmp, b, c)
self.assertRaises(RuntimeError, cmp, c, a)
self.assertRaises(RuntimeError, cmp, a, c)
# okay, now break the cycles
a.pop(); b.pop(); c.pop()
self.assertRaises(TypeError, cmp)
def test_coerce(self):
self.assert_(not fcmp(coerce(1, 1.1), (1.0, 1.1)))
self.assertEqual(coerce(1, 1L), (1L, 1L))
self.assert_(not fcmp(coerce(1L, 1.1), (1.0, 1.1)))
self.assertRaises(TypeError, coerce)
class BadNumber:
def __coerce__(self, other):
raise ValueError
self.assertRaises(ValueError, coerce, 42, BadNumber())
self.assertRaises(OverflowError, coerce, 0.5, int("12345" * 1000))
def test_compile(self):
compile('print 1\n', '', 'exec')
bom = '\xef\xbb\xbf'
compile(bom + 'print 1\n', '', 'exec')
compile(source='pass', filename='?', mode='exec')
compile(dont_inherit=0, filename='tmp', source='0', mode='eval')
compile('pass', '?', dont_inherit=1, mode='exec')
self.assertRaises(TypeError, compile)
self.assertRaises(ValueError, compile, 'print 42\n', '<string>', 'badmode')
self.assertRaises(ValueError, compile, 'print 42\n', '<string>', 'single', 0xff)
self.assertRaises(TypeError, compile, chr(0), 'f', 'exec')
self.assertRaises(TypeError, compile, 'pass', '?', 'exec',
mode='eval', source='0', filename='tmp')
if have_unicode:
compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec')
self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec')
self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad')
def test_delattr(self):
import sys
sys.spam = 1
delattr(sys, 'spam')
self.assertRaises(TypeError, delattr)
def test_dir(self):
# dir(wrong number of arguments)
self.assertRaises(TypeError, dir, 42, 42)
# dir() - local scope
local_var = 1
self.assert_('local_var' in dir())
# dir(module)
import sys
self.assert_('exit' in dir(sys))
# dir(module_with_invalid__dict__)
import types
class Foo(types.ModuleType):
__dict__ = 8
f = Foo("foo")
self.assertRaises(TypeError, dir, f)
# dir(type)
self.assert_("strip" in dir(str))
self.assert_("__mro__" not in dir(str))
# dir(obj)
class Foo(object):
def __init__(self):
self.x = 7
self.y = 8
self.z = 9
f = Foo()
self.assert_("y" in dir(f))
# dir(obj_no__dict__)
class Foo(object):
__slots__ = []
f = Foo()
self.assert_("__repr__" in dir(f))
# dir(obj_no__class__with__dict__)
# (an ugly trick to cause getattr(f, "__class__") to fail)
class Foo(object):
__slots__ = ["__class__", "__dict__"]
def __init__(self):
self.bar = "wow"
f = Foo()
self.assert_("__repr__" not in dir(f))
self.assert_("bar" in dir(f))
# dir(obj_using __dir__)
class Foo(object):
def __dir__(self):
return ["kan", "ga", "roo"]
f = Foo()
self.assert_(dir(f) == ["ga", "kan", "roo"])
# dir(obj__dir__not_list)
class Foo(object):
def __dir__(self):
return 7
f = Foo()
self.assertRaises(TypeError, dir, f)
def test_divmod(self):
self.assertEqual(divmod(12, 7), (1, 5))
self.assertEqual(divmod(-12, 7), (-2, 2))
self.assertEqual(divmod(12, -7), (-2, -2))
self.assertEqual(divmod(-12, -7), (1, -5))
self.assertEqual(divmod(12L, 7L), (1L, 5L))
self.assertEqual(divmod(-12L, 7L), (-2L, 2L))
self.assertEqual(divmod(12L, -7L), (-2L, -2L))
self.assertEqual(divmod(-12L, -7L), (1L, -5L))
self.assertEqual(divmod(12, 7L), (1, 5L))
self.assertEqual(divmod(-12, 7L), (-2, 2L))
self.assertEqual(divmod(12L, -7), (-2L, -2))
self.assertEqual(divmod(-12L, -7), (1L, -5))
self.assertEqual(divmod(-sys.maxint-1, -1),
(sys.maxint+1, 0))
self.assert_(not fcmp(divmod(3.25, 1.0), (3.0, 0.25)))
self.assert_(not fcmp(divmod(-3.25, 1.0), (-4.0, 0.75)))
self.assert_(not fcmp(divmod(3.25, -1.0), (-4.0, -0.75)))
self.assert_(not fcmp(divmod(-3.25, -1.0), (3.0, -0.25)))
self.assertRaises(TypeError, divmod)
def test_eval(self):
self.assertEqual(eval('1+1'), 2)
self.assertEqual(eval(' 1+1\n'), 2)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
self.assertEqual(eval('a', globals) , 1)
self.assertEqual(eval('a', globals, locals), 1)
self.assertEqual(eval('b', globals, locals), 200)
self.assertEqual(eval('c', globals, locals), 300)
if have_unicode:
self.assertEqual(eval(unicode('1+1')), 2)
self.assertEqual(eval(unicode(' 1+1\n')), 2)
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
if have_unicode:
self.assertEqual(eval(unicode('a'), globals), 1)
self.assertEqual(eval(unicode('a'), globals, locals), 1)
self.assertEqual(eval(unicode('b'), globals, locals), 200)
self.assertEqual(eval(unicode('c'), globals, locals), 300)
bom = '\xef\xbb\xbf'
self.assertEqual(eval(bom + 'a', globals, locals), 1)
self.assertEqual(eval(unicode('u"\xc3\xa5"', 'utf8'), globals),
unicode('\xc3\xa5', 'utf8'))
self.assertRaises(TypeError, eval)
self.assertRaises(TypeError, eval, ())
def test_general_eval(self):
# Tests that general mappings can be used for the locals argument
class M:
"Test mapping interface versus possible calls from eval()."
def __getitem__(self, key):
if key == 'a':
return 12
raise KeyError
def keys(self):
return list('xyz')
m = M()
g = globals()
self.assertEqual(eval('a', g, m), 12)
self.assertRaises(NameError, eval, 'b', g, m)
self.assertEqual(eval('dir()', g, m), list('xyz'))
self.assertEqual(eval('globals()', g, m), g)
self.assertEqual(eval('locals()', g, m), m)
self.assertRaises(TypeError, eval, 'a', m)
class A:
"Non-mapping"
pass
m = A()
self.assertRaises(TypeError, eval, 'a', g, m)
# Verify that dict subclasses work as well
class D(dict):
def __getitem__(self, key):
if key == 'a':
return 12
return dict.__getitem__(self, key)
def keys(self):
return list('xyz')
d = D()
self.assertEqual(eval('a', g, d), 12)
self.assertRaises(NameError, eval, 'b', g, d)
self.assertEqual(eval('dir()', g, d), list('xyz'))
self.assertEqual(eval('globals()', g, d), g)
self.assertEqual(eval('locals()', g, d), d)
# Verify locals stores (used by list comps)
eval('[locals() for i in (2,3)]', g, d)
eval('[locals() for i in (2,3)]', g, UserDict.UserDict())
class SpreadSheet:
"Sample application showing nested, calculated lookups."
_cells = {}
def __setitem__(self, key, formula):
self._cells[key] = formula
def __getitem__(self, key):
return eval(self._cells[key], globals(), self)
ss = SpreadSheet()
ss['a1'] = '5'
ss['a2'] = 'a1*6'
ss['a3'] = 'a2*7'
self.assertEqual(ss['a3'], 210)
# Verify that dir() catches a non-list returned by eval
# SF bug #1004669
class C:
def __getitem__(self, item):
raise KeyError(item)
def keys(self):
return 'a'
self.assertRaises(TypeError, eval, 'dir()', globals(), C())
# Done outside of the method test_z to get the correct scope
z = 0
f = open(TESTFN, 'w')
f.write('z = z+1\n')
f.write('z = z*2\n')
f.close()
with _check_py3k_warnings(("execfile.. not supported in 3.x",
DeprecationWarning)):
execfile(TESTFN)
def test_execfile(self):
global numruns
if numruns:
return
numruns += 1
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
self.assertEqual(self.__class__.z, 2)
globals['z'] = 0
execfile(TESTFN, globals)
self.assertEqual(globals['z'], 2)
locals['z'] = 0
execfile(TESTFN, globals, locals)
self.assertEqual(locals['z'], 2)
class M:
"Test mapping interface versus possible calls from execfile()."
def __init__(self):
self.z = 10
def __getitem__(self, key):
if key == 'z':
return self.z
raise KeyError
def __setitem__(self, key, value):
if key == 'z':
self.z = value
return
raise KeyError
locals = M()
locals['z'] = 0
execfile(TESTFN, globals, locals)
self.assertEqual(locals['z'], 2)
unlink(TESTFN)
self.assertRaises(TypeError, execfile)
self.assertRaises(TypeError, execfile, TESTFN, {}, ())
import os
self.assertRaises(IOError, execfile, os.curdir)
self.assertRaises(IOError, execfile, "I_dont_exist")
def test_filter(self):
self.assertEqual(filter(lambda c: 'a' <= c <= 'z', 'Hello World'), 'elloorld')
self.assertEqual(filter(None, [1, 'hello', [], [3], '', None, 9, 0]), [1, 'hello', [3], 9])
self.assertEqual(filter(lambda x: x > 0, [1, -3, 9, 0, 2]), [1, 9, 2])
self.assertEqual(filter(None, Squares(10)), [1, 4, 9, 16, 25, 36, 49, 64, 81])
self.assertEqual(filter(lambda x: x%2, Squares(10)), [1, 9, 25, 49, 81])
def identity(item):
return 1
filter(identity, Squares(5))
self.assertRaises(TypeError, filter)
class BadSeq(object):
def __getitem__(self, index):
if index<4:
return 42
raise ValueError
self.assertRaises(ValueError, filter, lambda x: x, BadSeq())
def badfunc():
pass
self.assertRaises(TypeError, filter, badfunc, range(5))
# test bltinmodule.c::filtertuple()
self.assertEqual(filter(None, (1, 2)), (1, 2))
self.assertEqual(filter(lambda x: x>=3, (1, 2, 3, 4)), (3, 4))
self.assertRaises(TypeError, filter, 42, (1, 2))
# test bltinmodule.c::filterstring()
self.assertEqual(filter(None, "12"), "12")
self.assertEqual(filter(lambda x: x>="3", "1234"), "34")
self.assertRaises(TypeError, filter, 42, "12")
class badstr(str):
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, filter, lambda x: x >="3", badstr("1234"))
class badstr2(str):
def __getitem__(self, index):
return 42
self.assertRaises(TypeError, filter, lambda x: x >=42, badstr2("1234"))
class weirdstr(str):
def __getitem__(self, index):
return weirdstr(2*str.__getitem__(self, index))
self.assertEqual(filter(lambda x: x>="33", weirdstr("1234")), "3344")
class shiftstr(str):
def __getitem__(self, index):
return chr(ord(str.__getitem__(self, index))+1)
self.assertEqual(filter(lambda x: x>="3", shiftstr("1234")), "345")
if have_unicode:
# test bltinmodule.c::filterunicode()
self.assertEqual(filter(None, unicode("12")), unicode("12"))
self.assertEqual(filter(lambda x: x>="3", unicode("1234")), unicode("34"))
self.assertRaises(TypeError, filter, 42, unicode("12"))
self.assertRaises(ValueError, filter, lambda x: x >="3", badstr(unicode("1234")))
class badunicode(unicode):
def __getitem__(self, index):
return 42
self.assertRaises(TypeError, filter, lambda x: x >=42, badunicode("1234"))
class weirdunicode(unicode):
def __getitem__(self, index):
return weirdunicode(2*unicode.__getitem__(self, index))
self.assertEqual(
filter(lambda x: x>=unicode("33"), weirdunicode("1234")), unicode("3344"))
class shiftunicode(unicode):
def __getitem__(self, index):
return unichr(ord(unicode.__getitem__(self, index))+1)
self.assertEqual(
filter(lambda x: x>=unicode("3"), shiftunicode("1234")),
unicode("345")
)
def test_filter_subclasses(self):
# test that filter() never returns tuple, str or unicode subclasses
# and that the result always goes through __getitem__
funcs = (None, bool, lambda x: True)
class tuple2(tuple):
def __getitem__(self, index):
return 2*tuple.__getitem__(self, index)
class str2(str):
def __getitem__(self, index):
return 2*str.__getitem__(self, index)
inputs = {
tuple2: {(): (), (1, 2, 3): (2, 4, 6)},
str2: {"": "", "123": "112233"}
}
if have_unicode:
class unicode2(unicode):
def __getitem__(self, index):
return 2*unicode.__getitem__(self, index)
inputs[unicode2] = {
unicode(): unicode(),
unicode("123"): unicode("112233")
}
for (cls, inps) in inputs.iteritems():
for (inp, exp) in inps.iteritems():
# make sure the output goes through __getitem__
# even if func is None
self.assertEqual(
filter(funcs[0], cls(inp)),
filter(funcs[1], cls(inp))
)
for func in funcs:
outp = filter(func, cls(inp))
self.assertEqual(outp, exp)
self.assert_(not isinstance(outp, cls))
def test_getattr(self):
import sys
self.assert_(getattr(sys, 'stdout') is sys.stdout)
self.assertRaises(TypeError, getattr, sys, 1)
self.assertRaises(TypeError, getattr, sys, 1, "foo")
self.assertRaises(TypeError, getattr)
if have_unicode:
self.assertRaises(UnicodeError, getattr, sys, unichr(sys.maxunicode))
def test_hasattr(self):
import sys
self.assert_(hasattr(sys, 'stdout'))
self.assertRaises(TypeError, hasattr, sys, 1)
self.assertRaises(TypeError, hasattr)
if have_unicode:
self.assertRaises(UnicodeError, hasattr, sys, unichr(sys.maxunicode))
# Check that hasattr allows SystemExit and KeyboardInterrupts by
class A:
def __getattr__(self, what):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, hasattr, A(), "b")
class B:
def __getattr__(self, what):
raise SystemExit
self.assertRaises(SystemExit, hasattr, B(), "b")
def test_hash(self):
hash(None)
self.assertEqual(hash(1), hash(1L))
self.assertEqual(hash(1), hash(1.0))
hash('spam')
if have_unicode:
self.assertEqual(hash('spam'), hash(unicode('spam')))
hash((0,1,2,3))
def f(): pass
self.assertRaises(TypeError, hash, [])
self.assertRaises(TypeError, hash, {})
# Bug 1536021: Allow hash to return long objects
class X:
def __hash__(self):
return 2**100
self.assertEquals(type(hash(X())), int)
class Y(object):
def __hash__(self):
return 2**100
self.assertEquals(type(hash(Y())), int)
class Z(long):
def __hash__(self):
return self
self.assertEquals(hash(Z(42)), hash(42L))
def test_hex(self):
self.assertEqual(hex(16), '0x10')
self.assertEqual(hex(16L), '0x10L')
self.assertEqual(hex(-16), '-0x10')
self.assertEqual(hex(-16L), '-0x10L')
self.assertRaises(TypeError, hex, {})
def test_id(self):
id(None)
id(1)
id(1L)
id(1.0)
id('spam')
id((0,1,2,3))
id([0,1,2,3])
id({'spam': 1, 'eggs': 2, 'ham': 3})
# Test input() later, together with raw_input
def test_intern(self):
self.assertRaises(TypeError, intern)
# This fails if the test is run twice with a constant string,
# therefore append the run counter
s = "never interned before " + str(numruns)
self.assert_(intern(s) is s)
s2 = s.swapcase().swapcase()
self.assert_(intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, intern, S("abc"))
# It's still safe to pass these strings to routines that
# call intern internally, e.g. PyObject_SetAttr().
s = S("abc")
setattr(s, s, s)
self.assertEqual(getattr(s, s), s)
def test_iter(self):
self.assertRaises(TypeError, iter)
self.assertRaises(TypeError, iter, 42, 42)
lists = [("1", "2"), ["1", "2"], "12"]
if have_unicode:
lists.append(unicode("12"))
for l in lists:
i = iter(l)
self.assertEqual(i.next(), '1')
self.assertEqual(i.next(), '2')
self.assertRaises(StopIteration, i.next)
def test_isinstance(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assert_(isinstance(c, C))
self.assert_(isinstance(d, C))
self.assert_(not isinstance(e, C))
self.assert_(not isinstance(c, D))
self.assert_(not isinstance('foo', E))
self.assertRaises(TypeError, isinstance, E, 'foo')
self.assertRaises(TypeError, isinstance)
def test_issubclass(self):
class C:
pass
class D(C):
pass
class E:
pass
c = C()
d = D()
e = E()
self.assert_(issubclass(D, C))
self.assert_(issubclass(C, C))
self.assert_(not issubclass(C, D))
self.assertRaises(TypeError, issubclass, 'foo', E)
self.assertRaises(TypeError, issubclass, E, 'foo')
self.assertRaises(TypeError, issubclass)
def test_len(self):
self.assertEqual(len('123'), 3)
self.assertEqual(len(()), 0)
self.assertEqual(len((1, 2, 3, 4)), 4)
self.assertEqual(len([1, 2, 3, 4]), 4)
self.assertEqual(len({}), 0)
self.assertEqual(len({'a':1, 'b': 2}), 2)
class BadSeq:
def __len__(self):
raise ValueError
self.assertRaises(ValueError, len, BadSeq())
def test_map(self):
self.assertEqual(
map(None, 'hello world'),
['h','e','l','l','o',' ','w','o','r','l','d']
)
self.assertEqual(
map(None, 'abcd', 'efg'),
[('a', 'e'), ('b', 'f'), ('c', 'g'), ('d', None)]
)
self.assertEqual(
map(None, range(10)),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
self.assertEqual(
map(lambda x: x*x, range(1,4)),
[1, 4, 9]
)
try:
from math import sqrt
except ImportError:
def sqrt(x):
return pow(x, 0.5)
self.assertEqual(
map(lambda x: map(sqrt,x), [[16, 4], [81, 9]]),
[[4.0, 2.0], [9.0, 3.0]]
)
self.assertEqual(
map(lambda x, y: x+y, [1,3,2], [9,1,4]),
[10, 4, 6]
)
def plus(*v):
accu = 0
for i in v: accu = accu + i
return accu
self.assertEqual(
map(plus, [1, 3, 7]),
[1, 3, 7]
)
self.assertEqual(
map(plus, [1, 3, 7], [4, 9, 2]),
[1+4, 3+9, 7+2]
)
self.assertEqual(
map(plus, [1, 3, 7], [4, 9, 2], [1, 1, 0]),
[1+4+1, 3+9+1, 7+2+0]
)
self.assertEqual(
map(None, Squares(10)),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
self.assertEqual(
map(int, Squares(10)),
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
)
self.assertEqual(
map(None, Squares(3), Squares(2)),
[(0,0), (1,1), (4,None)]
)
self.assertEqual(
map(max, Squares(3), Squares(2)),
[0, 1, 4]
)
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, map, lambda x: x, 42)
self.assertEqual(map(None, [42]), [42])
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, map, lambda x: x, BadSeq())
def badfunc(x):
raise RuntimeError
self.assertRaises(RuntimeError, map, badfunc, range(5))
def test_max(self):
self.assertEqual(max('123123'), '3')
self.assertEqual(max(1, 2, 3), 3)
self.assertEqual(max((1, 2, 3, 1, 2, 3)), 3)
self.assertEqual(max([1, 2, 3, 1, 2, 3]), 3)
self.assertEqual(max(1, 2L, 3.0), 3.0)
self.assertEqual(max(1L, 2.0, 3), 3)
self.assertEqual(max(1.0, 2, 3L), 3L)
for stmt in (
"max(key=int)", # no args
"max(1, key=int)", # single arg not iterable
"max(1, 2, keystone=int)", # wrong keyword
"max(1, 2, key=int, abc=int)", # two many keywords
"max(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt) in globals()
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(max((1,), key=neg), 1) # one elem iterable
self.assertEqual(max((1,2), key=neg), 1) # two elem iterable
self.assertEqual(max(1, 2, key=neg), 1) # two elems
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(max(data, key=f),
sorted(reversed(data), key=f)[-1])
def test_min(self):
self.assertEqual(min('123123'), '1')
self.assertEqual(min(1, 2, 3), 1)
self.assertEqual(min((1, 2, 3, 1, 2, 3)), 1)
self.assertEqual(min([1, 2, 3, 1, 2, 3]), 1)
self.assertEqual(min(1, 2L, 3.0), 1)
self.assertEqual(min(1L, 2.0, 3), 1L)
self.assertEqual(min(1.0, 2, 3L), 1.0)
self.assertRaises(TypeError, min)
self.assertRaises(TypeError, min, 42)
self.assertRaises(ValueError, min, ())
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, min, BadSeq())
class BadNumber:
def __cmp__(self, other):
raise ValueError
self.assertRaises(ValueError, min, (42, BadNumber()))
for stmt in (
"min(key=int)", # no args
"min(1, key=int)", # single arg not iterable
"min(1, 2, keystone=int)", # wrong keyword
"min(1, 2, key=int, abc=int)", # two many keywords
"min(1, 2, key=1)", # keyfunc is not callable
):
try:
exec(stmt) in globals()
except TypeError:
pass
else:
self.fail(stmt)
self.assertEqual(min((1,), key=neg), 1) # one elem iterable
self.assertEqual(min((1,2), key=neg), 2) # two elem iterable
self.assertEqual(min(1, 2, key=neg), 2) # two elems
data = [random.randrange(200) for i in range(100)]
keys = dict((elem, random.randrange(50)) for elem in data)
f = keys.__getitem__
self.assertEqual(min(data, key=f),
sorted(data, key=f)[0])
def test_next(self):
it = iter(range(2))
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertRaises(StopIteration, next, it)
self.assertEquals(next(it, 42), 42)
class Iter(object):
def __iter__(self):
return self
def next(self):
raise StopIteration
it = iter(Iter())
self.assertEquals(next(it, 42), 42)
self.assertRaises(StopIteration, next, it)
def gen():
yield 1
return
it = gen()
self.assertEquals(next(it), 1)
self.assertRaises(StopIteration, next, it)
self.assertEquals(next(it, 42), 42)
def test_oct(self):
self.assertEqual(oct(100), '0144')
self.assertEqual(oct(100L), '0144L')
self.assertEqual(oct(-100), '-0144')
self.assertEqual(oct(-100L), '-0144L')
self.assertRaises(TypeError, oct, ())
def write_testfile(self):
# NB the first 4 lines are also used to test input and raw_input, below
fp = open(TESTFN, 'w')
try:
fp.write('1+1\n')
fp.write('1+1\n')
fp.write('The quick brown fox jumps over the lazy dog')
fp.write('.\n')
fp.write('Dear John\n')
fp.write('XXX'*100)
fp.write('YYY'*100)
finally:
fp.close()
def test_open(self):
self.write_testfile()
fp = open(TESTFN, 'r')
try:
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(4), '1+1\n')
self.assertEqual(fp.readline(), 'The quick brown fox jumps over the lazy dog.\n')
self.assertEqual(fp.readline(4), 'Dear')
self.assertEqual(fp.readline(100), ' John\n')
self.assertEqual(fp.read(300), 'XXX'*100)
self.assertEqual(fp.read(1000), 'YYY'*100)
finally:
fp.close()
unlink(TESTFN)
def test_ord(self):
self.assertEqual(ord(' '), 32)
self.assertEqual(ord('A'), 65)
self.assertEqual(ord('a'), 97)
if have_unicode:
self.assertEqual(ord(unichr(sys.maxunicode)), sys.maxunicode)
self.assertRaises(TypeError, ord, 42)
if have_unicode:
self.assertRaises(TypeError, ord, unicode("12"))
def test_pow(self):
self.assertEqual(pow(0,0), 1)
self.assertEqual(pow(0,1), 0)
self.assertEqual(pow(1,0), 1)
self.assertEqual(pow(1,1), 1)
self.assertEqual(pow(2,0), 1)
self.assertEqual(pow(2,10), 1024)
self.assertEqual(pow(2,20), 1024*1024)
self.assertEqual(pow(2,30), 1024*1024*1024)
self.assertEqual(pow(-2,0), 1)
self.assertEqual(pow(-2,1), -2)
self.assertEqual(pow(-2,2), 4)
self.assertEqual(pow(-2,3), -8)
self.assertEqual(pow(0L,0), 1)
self.assertEqual(pow(0L,1), 0)
self.assertEqual(pow(1L,0), 1)
self.assertEqual(pow(1L,1), 1)
self.assertEqual(pow(2L,0), 1)
self.assertEqual(pow(2L,10), 1024)
self.assertEqual(pow(2L,20), 1024*1024)
self.assertEqual(pow(2L,30), 1024*1024*1024)
self.assertEqual(pow(-2L,0), 1)
self.assertEqual(pow(-2L,1), -2)
self.assertEqual(pow(-2L,2), 4)
self.assertEqual(pow(-2L,3), -8)
self.assertAlmostEqual(pow(0.,0), 1.)
self.assertAlmostEqual(pow(0.,1), 0.)
self.assertAlmostEqual(pow(1.,0), 1.)
self.assertAlmostEqual(pow(1.,1), 1.)
self.assertAlmostEqual(pow(2.,0), 1.)
self.assertAlmostEqual(pow(2.,10), 1024.)
self.assertAlmostEqual(pow(2.,20), 1024.*1024.)
self.assertAlmostEqual(pow(2.,30), 1024.*1024.*1024.)
self.assertAlmostEqual(pow(-2.,0), 1.)
self.assertAlmostEqual(pow(-2.,1), -2.)
self.assertAlmostEqual(pow(-2.,2), 4.)
self.assertAlmostEqual(pow(-2.,3), -8.)
for x in 2, 2L, 2.0:
for y in 10, 10L, 10.0:
for z in 1000, 1000L, 1000.0:
if isinstance(x, float) or \
isinstance(y, float) or \
isinstance(z, float):
self.assertRaises(TypeError, pow, x, y, z)
else:
self.assertAlmostEqual(pow(x, y, z), 24.0)
self.assertRaises(TypeError, pow, -1, -2, 3)
self.assertRaises(ValueError, pow, 1, 2, 0)
self.assertRaises(TypeError, pow, -1L, -2L, 3L)
self.assertRaises(ValueError, pow, 1L, 2L, 0L)
# Will return complex in 3.0:
self.assertRaises(ValueError, pow, -342.43, 0.234)
self.assertRaises(TypeError, pow)
def test_range(self):
self.assertEqual(range(3), [0, 1, 2])
self.assertEqual(range(1, 5), [1, 2, 3, 4])
self.assertEqual(range(0), [])
self.assertEqual(range(-3), [])
self.assertEqual(range(1, 10, 3), [1, 4, 7])
self.assertEqual(range(5, -5, -3), [5, 2, -1, -4])
# Now test range() with longs
self.assertEqual(range(-2**100), [])
self.assertEqual(range(0, -2**100), [])
self.assertEqual(range(0, 2**100, -1), [])
self.assertEqual(range(0, 2**100, -1), [])
a = long(10 * sys.maxint)
b = long(100 * sys.maxint)
c = long(50 * sys.maxint)
self.assertEqual(range(a, a+2), [a, a+1])
self.assertEqual(range(a+2, a, -1L), [a+2, a+1])
self.assertEqual(range(a+4, a, -2), [a+4, a+2])
seq = range(a, b, c)
self.assert_(a in seq)
self.assert_(b not in seq)
self.assertEqual(len(seq), 2)
seq = range(b, a, -c)
self.assert_(b in seq)
self.assert_(a not in seq)
self.assertEqual(len(seq), 2)
seq = range(-a, -b, -c)
self.assert_(-a in seq)
self.assert_(-b not in seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, range)
self.assertRaises(TypeError, range, 1, 2, 3, 4)
self.assertRaises(ValueError, range, 1, 2, 0)
self.assertRaises(ValueError, range, a, a + 1, long(0))
class badzero(int):
def __cmp__(self, other):
raise RuntimeError
__hash__ = None # Invalid cmp makes this unhashable
self.assertRaises(RuntimeError, range, a, a + 1, badzero(1))
# Reject floats when it would require PyLongs to represent.
# (smaller floats still accepted, but deprecated)
with check_warnings() as w:
warnings.simplefilter("always")
self.assertRaises(TypeError, range, 1e100, 1e101, 1e101)
with check_warnings() as w:
warnings.simplefilter("always")
self.assertEqual(range(1.0), [0])
self.assertRaises(TypeError, range, 0, "spam")
self.assertRaises(TypeError, range, 0, 42, "spam")
self.assertRaises(OverflowError, range, -sys.maxint, sys.maxint)
self.assertRaises(OverflowError, range, 0, 2*sys.maxint)
bignum = 2*sys.maxint
smallnum = 42
# Old-style user-defined class with __int__ method
class I0:
def __init__(self, n):
self.n = int(n)
def __int__(self):
return self.n
self.assertEqual(range(I0(bignum), I0(bignum + 1)), [bignum])
self.assertEqual(range(I0(smallnum), I0(smallnum + 1)), [smallnum])
# New-style user-defined class with __int__ method
class I1(object):
def __init__(self, n):
self.n = int(n)
def __int__(self):
return self.n
self.assertEqual(range(I1(bignum), I1(bignum + 1)), [bignum])
self.assertEqual(range(I1(smallnum), I1(smallnum + 1)), [smallnum])
# New-style user-defined class with failing __int__ method
class IX(object):
def __int__(self):
raise RuntimeError
self.assertRaises(RuntimeError, range, IX())
# New-style user-defined class with invalid __int__ method
class IN(object):
def __int__(self):
return "not a number"
self.assertRaises(TypeError, range, IN())
# Exercise various combinations of bad arguments, to check
# refcounting logic
with check_warnings():
self.assertRaises(TypeError, range, 1e100)
self.assertRaises(TypeError, range, 0, 1e100)
self.assertRaises(TypeError, range, 1e100, 0)
self.assertRaises(TypeError, range, 1e100, 1e100)
self.assertRaises(TypeError, range, 0, 0, 1e100)
self.assertRaises(TypeError, range, 0, 1e100, 1)
self.assertRaises(TypeError, range, 0, 1e100, 1e100)
self.assertRaises(TypeError, range, 1e100, 0, 1)
self.assertRaises(TypeError, range, 1e100, 0, 1e100)
self.assertRaises(TypeError, range, 1e100, 1e100, 1)
self.assertRaises(TypeError, range, 1e100, 1e100, 1e100)
def test_input_and_raw_input(self):
self.write_testfile()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
savestdout = sys.stdout # Eats the echo
try:
sys.stdin = fp
sys.stdout = BitBucket()
self.assertEqual(input(), 2)
self.assertEqual(input('testing\n'), 2)
self.assertEqual(raw_input(), 'The quick brown fox jumps over the lazy dog.')
self.assertEqual(raw_input('testing\n'), 'Dear John')
# SF 1535165: don't segfault on closed stdin
# sys.stdout must be a regular file for triggering
sys.stdout = savestdout
sys.stdin.close()
self.assertRaises(ValueError, input)
sys.stdout = BitBucket()
sys.stdin = cStringIO.StringIO("NULL\0")
self.assertRaises(TypeError, input, 42, 42)
sys.stdin = cStringIO.StringIO(" 'whitespace'")
self.assertEqual(input(), 'whitespace')
sys.stdin = cStringIO.StringIO()
self.assertRaises(EOFError, input)
# SF 876178: make sure input() respect future options.
sys.stdin = cStringIO.StringIO('1/2')
sys.stdout = cStringIO.StringIO()
exec compile('print input()', 'test_builtin_tmp', 'exec')
sys.stdin.seek(0, 0)
exec compile('from __future__ import division;print input()',
'test_builtin_tmp', 'exec')
sys.stdin.seek(0, 0)
exec compile('print input()', 'test_builtin_tmp', 'exec')
# The result we expect depends on whether new division semantics
# are already in effect.
if 1/2 == 0:
# This test was compiled with old semantics.
expected = ['0', '0.5', '0']
else:
# This test was compiled with new semantics (e.g., -Qnew
# was given on the command line.
expected = ['0.5', '0.5', '0.5']
self.assertEqual(sys.stdout.getvalue().splitlines(), expected)
del sys.stdout
self.assertRaises(RuntimeError, input, 'prompt')
del sys.stdin
self.assertRaises(RuntimeError, input, 'prompt')
finally:
sys.stdin = savestdin
sys.stdout = savestdout
fp.close()
unlink(TESTFN)
def test_reduce(self):
self.assertEqual(reduce(lambda x, y: x+y, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
reduce(lambda x, y: x+y, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
reduce(lambda x, y: x*y, range(2,21), 1L),
2432902008176640000L
)
self.assertEqual(reduce(lambda x, y: x+y, Squares(10)), 285)
self.assertEqual(reduce(lambda x, y: x+y, Squares(10), 0), 285)
self.assertEqual(reduce(lambda x, y: x+y, Squares(0), 0), 0)
self.assertRaises(TypeError, reduce)
self.assertRaises(TypeError, reduce, 42, 42)
self.assertRaises(TypeError, reduce, 42, 42, 42)
self.assertEqual(reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, reduce, 42, (42, 42))
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, reduce, 42, BadSeq())
def test_reload(self):
import marshal
reload(marshal)
import string
reload(string)
## import sys
## self.assertRaises(ImportError, reload, sys)
def test_repr(self):
self.assertEqual(repr(''), '\'\'')
self.assertEqual(repr(0), '0')
self.assertEqual(repr(0L), '0L')
self.assertEqual(repr(()), '()')
self.assertEqual(repr([]), '[]')
self.assertEqual(repr({}), '{}')
a = []
a.append(a)
self.assertEqual(repr(a), '[[...]]')
a = {}
a[0] = a
self.assertEqual(repr(a), '{0: {...}}')
def test_round(self):
self.assertEqual(round(0.0), 0.0)
self.assertEqual(type(round(0.0)), float) # Will be int in 3.0.
self.assertEqual(round(1.0), 1.0)
self.assertEqual(round(10.0), 10.0)
self.assertEqual(round(1000000000.0), 1000000000.0)
self.assertEqual(round(1e20), 1e20)
self.assertEqual(round(-1.0), -1.0)
self.assertEqual(round(-10.0), -10.0)
self.assertEqual(round(-1000000000.0), -1000000000.0)
self.assertEqual(round(-1e20), -1e20)
self.assertEqual(round(0.1), 0.0)
self.assertEqual(round(1.1), 1.0)
self.assertEqual(round(10.1), 10.0)
self.assertEqual(round(1000000000.1), 1000000000.0)
self.assertEqual(round(-1.1), -1.0)
self.assertEqual(round(-10.1), -10.0)
self.assertEqual(round(-1000000000.1), -1000000000.0)
self.assertEqual(round(0.9), 1.0)
self.assertEqual(round(9.9), 10.0)
self.assertEqual(round(999999999.9), 1000000000.0)
self.assertEqual(round(-0.9), -1.0)
self.assertEqual(round(-9.9), -10.0)
self.assertEqual(round(-999999999.9), -1000000000.0)
self.assertEqual(round(-8.0, -1), -10.0)
self.assertEqual(type(round(-8.0, -1)), float)
self.assertEqual(type(round(-8.0, 0)), float)
self.assertEqual(type(round(-8.0, 1)), float)
# Check half rounding behaviour.
self.assertEqual(round(5.5), 6)
self.assertEqual(round(6.5), 7)
self.assertEqual(round(-5.5), -6)
self.assertEqual(round(-6.5), -7)
# Check behavior on ints
self.assertEqual(round(0), 0)
self.assertEqual(round(8), 8)
self.assertEqual(round(-8), -8)
self.assertEqual(type(round(0)), float) # Will be int in 3.0.
self.assertEqual(type(round(-8, -1)), float)
self.assertEqual(type(round(-8, 0)), float)
self.assertEqual(type(round(-8, 1)), float)
# test new kwargs
self.assertEqual(round(number=-8.0, ndigits=-1), -10.0)
self.assertRaises(TypeError, round)
# test generic rounding delegation for reals
class TestRound(object):
def __float__(self):
return 23.0
class TestNoRound(object):
pass
self.assertEqual(round(TestRound()), 23)
self.assertRaises(TypeError, round, 1, 2, 3)
self.assertRaises(TypeError, round, TestNoRound())
t = TestNoRound()
t.__float__ = lambda *args: args
self.assertRaises(TypeError, round, t)
self.assertRaises(TypeError, round, t, 0)
def test_round_large(self):
# Issue #1869: integral floats should remain unchanged
self.assertEqual(round(5e15-1), 5e15-1)
self.assertEqual(round(5e15), 5e15)
self.assertEqual(round(5e15+1), 5e15+1)
self.assertEqual(round(5e15+2), 5e15+2)
self.assertEqual(round(5e15+3), 5e15+3)
def test_setattr(self):
setattr(sys, 'spam', 1)
self.assertEqual(sys.spam, 1)
self.assertRaises(TypeError, setattr, sys, 1, 'spam')
self.assertRaises(TypeError, setattr)
def test_sum(self):
self.assertEqual(sum([]), 0)
self.assertEqual(sum(range(2,8)), 27)
self.assertEqual(sum(iter(range(2,8))), 27)
self.assertEqual(sum(Squares(10)), 285)
self.assertEqual(sum(iter(Squares(10))), 285)
self.assertEqual(sum([[1], [2], [3]], []), [1, 2, 3])
self.assertRaises(TypeError, sum)
self.assertRaises(TypeError, sum, 42)
self.assertRaises(TypeError, sum, ['a', 'b', 'c'])
self.assertRaises(TypeError, sum, ['a', 'b', 'c'], '')
self.assertRaises(TypeError, sum, [[1], [2], [3]])
self.assertRaises(TypeError, sum, [{2:3}])
self.assertRaises(TypeError, sum, [{2:3}]*2, {2:3})
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, sum, BadSeq())
def test_type(self):
self.assertEqual(type(''), type('123'))
self.assertNotEqual(type(''), type(()))
def test_unichr(self):
if have_unicode:
self.assertEqual(unichr(32), unicode(' '))
self.assertEqual(unichr(65), unicode('A'))
self.assertEqual(unichr(97), unicode('a'))
self.assertEqual(
unichr(sys.maxunicode),
unicode('\\U%08x' % (sys.maxunicode), 'unicode-escape')
)
self.assertRaises(ValueError, unichr, sys.maxunicode+1)
self.assertRaises(TypeError, unichr)
self.assertRaises((OverflowError, ValueError), unichr, 2**32)
# We don't want self in vars(), so these are static methods
@staticmethod
def get_vars_f0():
return vars()
@staticmethod
def get_vars_f2():
BuiltinTest.get_vars_f0()
a = 1
b = 2
return vars()
def test_vars(self):
self.assertEqual(set(vars()), set(dir()))
import sys
self.assertEqual(set(vars(sys)), set(dir(sys)))
self.assertEqual(self.get_vars_f0(), {})
self.assertEqual(self.get_vars_f2(), {'a': 1, 'b': 2})
self.assertRaises(TypeError, vars, 42, 42)
self.assertRaises(TypeError, vars, 42)
def test_zip(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
self.assertEqual(zip(a, b), t)
b = [4, 5, 6]
self.assertEqual(zip(a, b), t)
b = (4, 5, 6, 7)
self.assertEqual(zip(a, b), t)
class I:
def __getitem__(self, i):
if i < 0 or i > 2: raise IndexError
return i + 4
self.assertEqual(zip(a, I()), t)
self.assertEqual(zip(), [])
self.assertEqual(zip(*[]), [])
self.assertRaises(TypeError, zip, None)
class G:
pass
self.assertRaises(TypeError, zip, a, G())
# Make sure zip doesn't try to allocate a billion elements for the
# result list when one of its arguments doesn't say how long it is.
# A MemoryError is the most likely failure mode.
class SequenceWithoutALength:
def __getitem__(self, i):
if i == 5:
raise IndexError
else:
return i
self.assertEqual(
zip(SequenceWithoutALength(), xrange(2**30)),
list(enumerate(range(5)))
)
class BadSeq:
def __getitem__(self, i):
if i == 5:
raise ValueError
else:
return i
self.assertRaises(ValueError, zip, BadSeq(), BadSeq())
def test_format(self):
# Test the basic machinery of the format() builtin. Don't test
# the specifics of the various formatters
self.assertEqual(format(3, ''), '3')
# Returns some classes to use for various tests. There's
# an old-style version, and a new-style version
def classes_new():
class A(object):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromA(A):
pass
class Simple(object): pass
class DerivedFromSimple(Simple):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromSimple2(DerivedFromSimple): pass
return A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2
# In 3.0, classes_classic has the same meaning as classes_new
def classes_classic():
class A:
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromA(A):
pass
class Simple: pass
class DerivedFromSimple(Simple):
def __init__(self, x):
self.x = x
def __format__(self, format_spec):
return str(self.x) + format_spec
class DerivedFromSimple2(DerivedFromSimple): pass
return A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2
def class_test(A, DerivedFromA, DerivedFromSimple, DerivedFromSimple2):
self.assertEqual(format(A(3), 'spec'), '3spec')
self.assertEqual(format(DerivedFromA(4), 'spec'), '4spec')
self.assertEqual(format(DerivedFromSimple(5), 'abc'), '5abc')
self.assertEqual(format(DerivedFromSimple2(10), 'abcdef'),
'10abcdef')
class_test(*classes_new())
class_test(*classes_classic())
def empty_format_spec(value):
# test that:
# format(x, '') == str(x)
# format(x) == str(x)
self.assertEqual(format(value, ""), str(value))
self.assertEqual(format(value), str(value))
# for builtin types, format(x, "") == str(x)
empty_format_spec(17**13)
empty_format_spec(1.0)
empty_format_spec(3.1415e104)
empty_format_spec(-3.1415e104)
empty_format_spec(3.1415e-104)
empty_format_spec(-3.1415e-104)
empty_format_spec(object)
empty_format_spec(None)
# TypeError because self.__format__ returns the wrong type
class BadFormatResult:
def __format__(self, format_spec):
return 1.0
self.assertRaises(TypeError, format, BadFormatResult(), "")
# TypeError because format_spec is not unicode or str
self.assertRaises(TypeError, format, object(), 4)
self.assertRaises(TypeError, format, object(), object())
# tests for object.__format__ really belong elsewhere, but
# there's no good place to put them
x = object().__format__('')
self.assert_(x.startswith('<object object at'))
# first argument to object.__format__ must be string
self.assertRaises(TypeError, object().__format__, 3)
self.assertRaises(TypeError, object().__format__, object())
self.assertRaises(TypeError, object().__format__, None)
# make sure we can take a subclass of str as a format spec
class DerivedFromStr(str): pass
self.assertEqual(format(0, DerivedFromStr('10')), ' 0')
def test_bin(self):
self.assertEqual(bin(0), '0b0')
self.assertEqual(bin(1), '0b1')
self.assertEqual(bin(-1), '-0b1')
self.assertEqual(bin(2**65), '0b1' + '0' * 65)
self.assertEqual(bin(2**65-1), '0b' + '1' * 65)
self.assertEqual(bin(-(2**65)), '-0b1' + '0' * 65)
self.assertEqual(bin(-(2**65-1)), '-0b' + '1' * 65)
def test_bytearray_translate(self):
x = bytearray("abc")
self.assertRaises(ValueError, x.translate, "1", 1)
self.assertRaises(TypeError, x.translate, "1"*256, 1)
class TestSorted(unittest.TestCase):
def test_basic(self):
data = range(100)
copy = data[:]
random.shuffle(copy)
self.assertEqual(data, sorted(copy))
self.assertNotEqual(data, copy)
data.reverse()
random.shuffle(copy)
self.assertEqual(data, sorted(copy, cmp=lambda x, y: cmp(y,x)))
self.assertNotEqual(data, copy)
random.shuffle(copy)
self.assertEqual(data, sorted(copy, key=lambda x: -x))
self.assertNotEqual(data, copy)
random.shuffle(copy)
self.assertEqual(data, sorted(copy, reverse=1))
self.assertNotEqual(data, copy)
def test_inputtypes(self):
s = 'abracadabra'
types = [list, tuple]
if have_unicode:
types.insert(0, unicode)
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
s = ''.join(dict.fromkeys(s).keys()) # unique letters only
types = [set, frozenset, list, tuple, dict.fromkeys]
if have_unicode:
types.insert(0, unicode)
for T in types:
self.assertEqual(sorted(s), sorted(T(s)))
def test_baddecorator(self):
data = 'The quick Brown fox Jumped over The lazy Dog'.split()
self.assertRaises(TypeError, sorted, data, None, lambda x,y: 0)
def _run_unittest(*args):
with _check_py3k_warnings(
(".+ not supported in 3.x", DeprecationWarning),
(".+ is renamed to imp.reload", DeprecationWarning),
("classic int division", DeprecationWarning)):
run_unittest(*args)
def test_main(verbose=None):
test_classes = (BuiltinTest, TestSorted)
_run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
_run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
{
"content_hash": "efbc67618ea210e61eab4d563d7b4201",
"timestamp": "",
"source": "github",
"line_count": 1604,
"max_line_length": 99,
"avg_line_length": 37.08416458852868,
"alnum_prop": 0.5174251466805642,
"repo_name": "babyliynfg/cross",
"id": "9261efb2bf3b7e0b581f2d71f6156588a0dbcbc2",
"size": "59524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/project-creator/Python2.6.6/Lib/test/test_builtin.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "36722"
},
{
"name": "C",
"bytes": "6345646"
},
{
"name": "C++",
"bytes": "15980000"
},
{
"name": "CMake",
"bytes": "1238"
},
{
"name": "GLSL",
"bytes": "64406"
},
{
"name": "HTML",
"bytes": "147661"
},
{
"name": "Java",
"bytes": "574078"
},
{
"name": "JavaScript",
"bytes": "503327"
},
{
"name": "Makefile",
"bytes": "18778"
},
{
"name": "Objective-C",
"bytes": "396703"
},
{
"name": "Objective-C++",
"bytes": "378740"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "15265548"
},
{
"name": "Roff",
"bytes": "23"
},
{
"name": "Shell",
"bytes": "61021"
},
{
"name": "Visual Basic",
"bytes": "19200"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from .preparer import Preparer # NOQA
|
{
"content_hash": "6ae34a4389df239497203239e310274a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 38,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.759493670886076,
"repo_name": "mvaled/sentry",
"id": "bdd469843998b7d42e0ae26ba57fe7bf88757e5c",
"size": "79",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/mediators/sentry_app_components/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
import os, sys
import os.path as osp
import argparse
import scipy.io as sio
this_dir = osp.dirname(__file__)
sys.path.insert(1, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_dump, path_to_index, proto_load, annot_boxes_at_frame
from vdetlib.utils.common import iou
import numpy as np
def save_if_not_exist(proto, path):
if not os.path.isfile(path):
proto_dump(box_proto, path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proposal_file')
parser.add_argument('vid_root')
parser.add_argument('save_root')
parser.add_argument('--annot_root', type=str, default=None)
args = parser.parse_args()
if not os.path.isdir(args.save_root):
os.makedirs(args.save_root)
h5 = False
try:
mat_file = sio.loadmat(args.proposal_file)
image_names = mat_file['images']
all_boxes = mat_file['boxes']
except NotImplementedError:
import h5py
h5 = True
mat_file = h5py.File(args.proposal_file, 'r')
image_names = mat_file.get('images')[0]
all_boxes = mat_file.get('boxes')[0]
cur_vid_name = None
for image_name, boxes in zip(image_names, all_boxes):
if not h5:
image_name = image_name[0][0]
boxes = boxes[0]
else:
image_name = ''.join(map(unichr, mat_file[image_name].value))
boxes = mat_file[boxes].value.T
parts = image_name.split('/')
if len(parts) == 3:
subset, video_name, frame_name = parts
elif len(parts) == 4:
__, subset, video_name, frame_name = parts
elif len(parts) == 2:
video_name, frame_name = parts
else:
raise ValueError('image name has {} components: {}'.format(
len(parts), image_name))
# start a new video
if cur_vid_name != video_name:
if cur_vid_name is not None:
print "Saving {}...".format(cur_vid_name)
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box'))
print "Processsing {}...".format(video_name)
box_proto = {}
box_proto['video'] = video_name
box_proto['boxes'] = []
cur_vid_name = video_name
# read vid_proto
vid_proto = proto_load(
os.path.join(args.vid_root, cur_vid_name+'.vid'))
if args.annot_root:
annot_proto = proto_load(
os.path.join(args.annot_root, cur_vid_name+'.annot'))
# process boxes
frame_idx = path_to_index(vid_proto, frame_name)
if args.annot_root:
annot_boxes = annot_boxes_at_frame(annot_proto, frame_idx)
for box in boxes:
bbox = box[0:4].tolist()
if args.annot_root:
# with GT
if len(annot_boxes) == 0:
overlaps = 0.
else:
overlaps = iou([bbox], annot_boxes)
box_proto['boxes'].append(
{
"frame": frame_idx,
"bbox": box[0:4].tolist(),
"positive": True if np.any(overlaps>=0.5) else False
}
)
else:
# no GT
box_proto['boxes'].append(
{
"frame": frame_idx,
"bbox": box[0:4].tolist(),
}
)
# save last proto
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box.gz'))
|
{
"content_hash": "4b393897ad2c65fee3e37efc11107538",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 94,
"avg_line_length": 36.742574257425744,
"alnum_prop": 0.511991376987335,
"repo_name": "myfavouritekk/TPN",
"id": "a83e676f28203568cbbaafa363af248222a9d015",
"size": "3734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/data/box_proto_from_proposals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "101524"
}
],
"symlink_target": ""
}
|
"""Pervasive/commonly-used utilities."""
import abc
import datetime as dt
import errno
import hashlib
import math
import re
import six
import socket
import ssl
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from oslo_log import log as logging
from oslo_utils import units
from pyasn1.codec.der import decoder as der_decoder
from pyasn1_modules import rfc2459
from pypowervm import const
from pypowervm.i18n import _
# Set up logging
LOG = logging.getLogger(__name__)
XPATH_DELIM = '/'
def dice_href(href, include_scheme_netloc=False, include_query=True,
include_fragment=True):
"""Parse, sanitize, and reassemble an href.
:param href: A full link string of the form
'<scheme>://<netloc>/<path>;<params>?<query>#<fragment>'.
This method also works if the <scheme>://<netloc> is omitted,
(but obviously include_scheme_netloc has no effect).
:param include_scheme_netloc: If True, the <scheme>://<netloc> portion is
included in the returned string. If False,
it is stripped.
:param include_query: If True, any ?<query> portion of the link will be
included in the return value.
:param include_fragment: If True, any #<fragment> portion of the link will
be included in the return value.
:return: A string representing the specified portion of the input link.
"""
parsed = urlparse.urlparse(href)
ret = ''
if include_scheme_netloc:
ret += parsed.scheme + '://' + parsed.netloc
ret += parsed.path
# trim trailing '/'s from path, if present
while ret.endswith('/'):
ret = ret[:-1]
if include_query and parsed.query:
ret += '?' + parsed.query
if include_fragment and parsed.fragment:
ret += '#' + parsed.fragment
return ret
def check_and_apply_xag(path, xag):
"""Validate extended attribute groups and produce the correct path.
If the existing path already has a group=* other than None, we use it.
However, if there is a proposed xag - including [] - it must match the
existing xag, or ValueError is raised.
Otherwise, we construct the group=* query param according to the
proposed xag list, as follows:
If xag is None, use group=None.
If xag is [] (the empty list), omit the group= query param entirely.
Otherwise the group= value is a sorted, comma-separated string of the
xag list. E.g. for xag=['b', 'c', 'a'], produce 'group=a,b,c'.
:param path: Input path or href, which may or may not contain a query
string, which may or may not contain a group=*. (Multiple
group=* not handled.) Values in the group=* must be
alpha sorted.
:param xag: Iterable of proposed extended attribute values to be included
in the query string of the resulting path.
:return: path, with at most one group=* in the query string. That
group= query param's value will be alpha sorted.
"""
parsed = urlparse.urlsplit(path)
# parse_qs returns { 'key': ['value'], ... }
qparms = urlparse.parse_qs(parsed.query) if parsed.query else {}
path_xag = qparms.pop('group', ['None'])[0]
if xag is None:
arg_xag = 'None'
else:
# Ensure we have a mutable copy to sort
xag = list(xag)
xag.sort()
arg_xag = ','.join(map(str, xag)) # may be ''
if path_xag == 'None':
# No existing xag. (Treat existing 'group=None' as if not there.)
# Use whatever was proposed (which may be implicit group=None or
# may be nothing).
path_xag = arg_xag
elif arg_xag != 'None':
# There was xag in the path already, as well as proposed xag (maybe
# empty). Previous xag must match proposed xag if specified
# (including empty).
if path_xag != arg_xag:
raise ValueError(_("Proposed extended attribute group "
"'%(arg_xag)s' doesn't match existing "
"extended attribute group '%(path_xag)s'") %
dict(arg_xag=arg_xag, path_xag=path_xag))
# else proposed xag is None, so use whatever was already in the path,
# Whatever we decided on, add it back to the query params if nonempty.
if path_xag != '':
qparms['group'] = [path_xag]
# Rebuild the querystring. Honor multiples (e.g. foo=bar&foo=baz).
# (We didn't expect/handle multiple group=*, but need to support it in
# other keys.)
qstr = '&'.join(['%s=%s' % (key, val)
for key, vals in qparms.items()
for val in vals])
return urlparse.urlunsplit((parsed.scheme, parsed.netloc, parsed.path,
qstr, parsed.fragment))
def extend_basepath(href, add):
"""Extends the base path of an href, accounting for querystring/fragment.
For example, extend_basepath('http://server:1234/foo?a=b&c=d#frag', '/bar')
=> 'http://server:1234/foo/bar?a=b&c=d#frag'
:param href: Path or href to augment. Scheme, netloc, query string, and
fragment are allowed but not required.
:param add: String to add onto the base path of the href. Must not contain
unescaped special characters such as '?', '&', '#'.
:return: The augmented href.
"""
parsed = urlparse.urlsplit(href)
basepath = parsed.path + add
return urlparse.urlunsplit((parsed.scheme, parsed.netloc, basepath,
parsed.query, parsed.fragment))
def is_instance_path(href):
"""Does the path or href represent an instance (end with UUID)?
:param href: Path or href to check. Scheme, netloc, query string, and
fragment are allowed but not required.
:return: True if href's path ends with a UUID, indicating that it
represents an instance (as opposed to a Feed or some special URI
such as quick or search).
"""
path = dice_href(href, include_scheme_netloc=False, include_query=False,
include_fragment=False)
return re.match(const.UUID_REGEX_WORD, path.rsplit('/', 1)[1])
# TODO(IBM): fix (for MITM attacks) or remove (if using loopback only)
def validate_certificate(host, port, certpath, certext):
hostname = re.sub('[:.]', '_', host)
cert_file = '%s%s%s' % (certpath, hostname, certext)
try:
with open(cert_file, 'r') as f:
# Retrieve previously trusted certificate
trusted_cert = ssl.PEM_cert_to_DER_cert(f.read())
except Exception:
# found no trusted certificate
return False
# Read current certificate from host
conn = None
try:
# workaround for http://bugs.python.org/issue11811
# should go back to using get_server_certificate when fixed
# (Issue is resolved as of python 3.3. Workaround still needed for
# python 2.7 support.)
# rawcert = ssl.get_server_certificate((host, port))
# current_cert = ssl.PEM_cert_to_DER_cert(rawcert)
conn = socket.create_connection((host, port))
sock = ssl.wrap_socket(conn)
current_cert = sock.getpeercert(True)
except Exception:
# couldn't get certificate from host
return False
finally:
if conn is not None:
conn.shutdown(socket.SHUT_RDWR)
conn.close()
# Verify certificate finger prints are the same
if not (hashlib.sha1(trusted_cert).digest() ==
hashlib.sha1(current_cert).digest()):
return False
# check certificate expiration
try:
cert = der_decoder.decode(current_cert,
asn1Spec=rfc2459.Certificate())[0]
tbs = cert.getComponentByName('tbsCertificate')
validity = tbs.getComponentByName('validity')
not_after = validity.getComponentByName('notAfter').getComponent()
not_after = dt.datetime.strptime(str(not_after), '%y%m%d%H%M%SZ')
if dt.datetime.utcnow() >= not_after:
LOG.warning(_('Certificate has expired.'))
return False
except Exception:
LOG.exception('error parsing cert for expiration check')
return False
return True
def get_req_path_uuid(path, preserve_case=False, root=False):
"""Extract request target uuid of sanitized path.
:param path: Path or URI from which to extract the UUID.
:param preserve_case: If False, the returned UUID will be lowercased. If
True, it will be returned as it exists in the path.
:param root: If True, and path represents a CHILD entry, the UUID of the
ROOT is returned. Otherwise, the UUID of the target is
returned.
"""
ret = None
p = dice_href(path, include_query=False, include_fragment=False)
if '/' in p:
for maybe_id in p.rsplit('/', 3)[1::2]:
uuid_match = re.match(const.UUID_REGEX_WORD, maybe_id)
if uuid_match:
ret = maybe_id if preserve_case else maybe_id.lower()
if root:
# Want to return the first one. (If it's a ROOT path, this
# will also happen to be the last one.)
break
return ret
def get_uuid_xag_from_path(path):
uuid = get_req_path_uuid(path)
parsed = urlparse.urlsplit(path)
# parse_qs returns { 'key': ['value'], ... }
qparms = urlparse.parse_qs(parsed.query) if parsed.query else {}
return uuid.lower(), qparms.get('group', [None])[0]
def convert_bytes_to_gb(bytes_, low_value=.0001, dp=None):
"""Converts an integer of bytes to a decimal representation of gigabytes.
If the value is too low, will return the 'low_value'. This is useful
for converting a small number of bytes (ex. 50) into gigabytes. Rounding
may be required.
:param bytes_: The integer number of bytes.
:param low_value: The minimum value that should be returned. (Note: if dp
is also specified, the value returned may be rounded up
and thus be higher than low_value.)
:param dp: If specified, the value is rounded up to the specified number of
decimal places by round_gb_size_up. (Note: None and zero are
very different.)
:returns: The decimal value.
"""
gb_size = bytes_ / float(units.Gi)
if gb_size < low_value:
gb_size = low_value
if dp is not None:
gb_size = round_gb_size_up(gb_size, dp=dp)
return gb_size
def round_gb_size_up(gb_size, dp=2):
"""Rounds a GB disk size (as a decimal float) up to suit the platform.
Use this method to ensure that new vdisks, LUs, etc. are big enough, as the
platform generally rounds inputs to the nearest [whatever]. For example, a
disk of size 4.321GB may wind up at 4.32GB after rounding, possibly leaving
insufficient space for the image.
:param gb_size: A decimal float representing the GB size to be rounded.
:param dp: The number of decimal places to round (up) to. May be zero
(round to next highest integer) or negative, (e.g. -1 will round to the
next highest ten).
:return: A new decimal float which is greater than or equal to the input.
"""
shift = 10.0 ** dp
return float(math.ceil(gb_size * shift)) / shift
def sanitize_mac_for_api(mac):
"""Converts a generalized mac address to one for the API.
Takes any standard mac (case-insensitive, with or without colons) and
formats it to uppercase and removes colons. This is the format for
the API.
:param mac: The input mac.
:returns: The sanitized mac.
"""
return mac.replace(':', '').upper()
def sanitize_bool_for_api(bool_val):
"""Sanitizes a boolean value for use in the API."""
return str(bool_val).lower()
def sanitize_float_for_api(float_val, precision=2):
"""Sanitizes a float value for use in the API."""
template = '%.' + str(precision) + 'f'
return template % float(float_val)
def sanitize_percent_for_api(float_val, precision=2):
"""Sanitizes a percent value for use in the API.
:param float_val: A float where valid values are 0.0 <= x <= 1.0. For
example the input 0.02 will produce output '2%'.
:return: A string representation of the passed percentage.
"""
percent_float = float(float_val)
if percent_float < 0 or percent_float > 1:
raise ValueError('A float value 0 <= x <= 1.0 must be provided.')
percent_float *= 100
percent_float = sanitize_float_for_api(percent_float, precision)
return str(percent_float) + '%'
def sanitize_wwpn_for_api(wwpn):
"""Updates the format of the WWPN to match the expected PowerVM format.
:param wwpn: The original WWPN.
:return: A WWPN of the format expected by the API.
"""
return wwpn.upper().replace(':', '')
def sanitize_file_name_for_api(name, prefix='', suffix='',
max_len=const.MaxLen.FILENAME_DEFAULT):
"""Generate a sanitized file name based on PowerVM's FileName.Pattern.
:param name: The base name to sanitize.
:param prefix: (Optional) A prefix to prepend to the 'name'. No delimiter
is added.
:param suffix: (Optional) A suffix to append to the 'name'. No delimiter
is added.
:param max_len: (Optional) The maximum allowable length of the final
sanitized string. Defaults to the API's defined length for
FileName.Pattern.
:return: A string scrubbed of all forbidden characters and trimmed for
length as necessary.
"""
def _scrub(in_name):
"""Returns in_name with illegal characters replaced with '_'."""
return re.sub(r'[^.0-9A-Z_a-z]', '_', in_name)
name, prefix, suffix = (_scrub(val) for val in (name, prefix, suffix))
base_len = max_len - len(prefix) - len(suffix)
if base_len <= 0:
raise ValueError(_("Prefix and suffix together may not be more than "
"%d characters."), max_len - 1)
name = name[:base_len]
ret = prefix + name + suffix
if not len(ret):
raise ValueError(_("Total length must be at least 1 character."))
return ret
def sanitize_partition_name_for_api(name, trunc_ok=True):
r"""Sanitize a string to be suitable for use as a partition name.
PowerVM's partition name restrictions are:
- Between 1 and 31 characters, inclusive;
- Containing ASCII characters between 0x20 (space) and 0x7E (~), inclusive,
except r()\<>*$&?|[]'"`
:param name: The name to scrub. Invalid characters will be replaced with
'_'.
:param trunc_ok: If True, and name exceeds 31 characters, it is truncated.
If False, and name exceeds 31 characters, ValueError is
raised.
:return: The scrubbed string.
:raise ValueError: If name is None or zero length; or if it exceeds length
31 and trunk_ok=False.
"""
max_len = 31
if not name:
raise ValueError(_("The name parameter must be at least one character "
"long."))
if not trunc_ok and len(name) > max_len:
raise ValueError(_("The name parameter must not exceed %d characters "
"when trunk_ok is False."), max_len)
return re.sub(r'[^- !#%+,./0-9:;=@A-Z^_a-z{}]', '_', name)[:max_len]
def find_equivalent(elem, find_list):
"""Returns the element from the list that is equal to the one passed in.
For remove operations and what not, the exact object may need to be
provided. This method will find the functionally equivalent element
from the list.
:param elem: The original element.
:param find_list: The list to search through.
:returns: An element from the that is functionally equivalent (based on
__eq__). If it does not exist, None is returned.
"""
for find_elem in find_list:
if find_elem == elem:
return find_elem
return None
def find_wrapper(haystack, needle_uuid):
"""Finds the corresponding wrapper from a list given the UUID.
:param haystack: A list of wrappers. Usually generated from a 'feed' that
has been loaded via the wrapper's wrap(response) method.
:param needle_uuid: The UUID of the object to find in the list.
:return: The corresponding wrapper for that UUID. If not found, None.
"""
for wrapper in haystack:
if wrapper.uuid == needle_uuid:
return wrapper
return None
def xpath(*toks):
"""Constructs an XPath out of the passed-in string components."""
return XPATH_DELIM.join(toks)
def part_id_by_loc_code(loc_code):
"""Get a partition short ID for a provided virtual device location code.
All location codes on a virtual device are of the form:
<MachineType>.<Model>.<Serial>-V<PartID>-C<SlotNumber>
:return: An int of the associated partition short ID.
"""
id_match = re.search('.*-V(.+?)-.*', loc_code)
return int(id_match.group(1)) if id_match else None
def xag_attrs(xagstr, base=const.DEFAULT_SCHEMA_ATTR):
"""Produce XML attributes for a property using extended attribute groups.
:param xagstr: Extended attribute group name (from pypowervm.const.XAG).
:param base: The dict of attributes to which to add the extended attribute
group. Usually one of the pypowervm.const values near
DEFAULT_SCHEMA_ATTR (the default).
:return: Dict of XML attributes suitable for the 'attrib' kwarg of a
(pypowervm.entities or etree) Element constructor.
"""
return dict(base, group=xagstr) if xagstr else base
def my_partition_id():
"""Return the short ID (not UUID) of the current partition, as an int."""
with open('/proc/ppc64/lparcfg') as lparcfg:
for line in lparcfg:
if line.startswith('partition_id='):
return int(line.split('=')[1].rstrip())
def parent_spec(parent, parent_type, parent_uuid):
"""Produce a canonical parent type and UUID suitable for read().
:param parent: EntryWrapper representing the parent. If specified,
parent_type and parent_uuid are ignored.
:param parent_type: EntryWrapper class or schema_type string representing
the schema type of the parent.
:param parent_uuid: String UUID of the parent.
:return parent_type: String schema type of the parent. The parent_type and
parent_uuid returns are both None or both valid
strings.
:return parent_uuid: String UUID of the parent. The parent_type and
parent_uuid returns are both None or both valid
strings.
:raise ValueError: If parent is None and parent_type xor parent_uuid is
specified.
"""
if all(param is None for param in (parent, parent_type, parent_uuid)):
return None, None
if parent is not None:
return parent.schema_type, parent.uuid
if any(param is None for param in (parent_type, parent_uuid)):
# parent_type xor parent_uuid specified
raise ValueError(_("Developer error: partial parent specification."))
# Allow either string or class for parent_type
if hasattr(parent_type, 'schema_type'):
parent_type = parent_type.schema_type
elif type(parent_type) is not str:
raise ValueError(_("Developer error: parent_type must be either a "
"string schema type or a Wrapper subclass."))
return parent_type, parent_uuid
def retry_io_command(base_cmd, *argv):
"""PEP475: Retry syscalls if EINTR signal received.
https://www.python.org/dev/peps/pep-0475/
Certain system calls can be interrupted by signal 4 (EINTR) for no good
reason. Per PEP475, these signals should be ignored. This is implemented
by default at the lowest level in py3, but we have to account for it in
py2.
:param base_cmd: The syscall to wrap.
:param argv: Arguments to the syscall.
:return: The return value from invoking the syscall.
"""
while True:
try:
return base_cmd(*argv)
except EnvironmentError as enve:
if enve.errno != errno.EINTR:
raise
@six.add_metaclass(abc.ABCMeta)
class _AllowedList(object):
"""For REST fields taking 'ALL', 'NONE', or [list of values].
Subclasses should override parse_val and sanitize_for_api.
"""
ALL = 'ALL'
NONE = 'NONE'
_GOOD_STRINGS = (ALL, NONE)
@staticmethod
def parse_val(val):
"""Parse a single list value from string to appropriate native type.
:param val: A single value to parse.
:return: The converted value.
"""
# Default native type: str
return val
@staticmethod
def sanitize_for_api(val):
"""Convert a native value to the expected string format for REST.
:param val: The native value to convert.
:return: Sanitized string value suitable for the REST API.
:raise ValueError: If the string can't be converted.
"""
# Default: Just string-convert
return str(val)
@classmethod
def unmarshal(cls, rest_val):
"""Convert value from REST to a list of vals or an accepted string."""
rest_val = rest_val.strip()
if rest_val in cls._GOOD_STRINGS:
return rest_val
return [cls.parse_val(val) for val in rest_val.split()]
@classmethod
def const_or_list(cls, val):
"""Return one of the _GOOD_STRINGS, or the (sanitized) original list.
:param val: One of:
- A string representing one of the _GOOD_STRINGS (case-
insensitive.
- A list containing a single value as above.
- A list containing values appropriate to the subclass.
:return: One of:
- A string representing one of the _GOOD_STRINGS (in the
appropriate case).
- A list of the original values, validated and sanitized for
the REST API.
The objective is to be able to pass the return value directly
into a setter or bld method expecting the relevant type.
:raise ValueError: If the input could not be interpreted/sanitized as
appropriate to the subclass.
"""
ret = None
if isinstance(val, str) and val.upper() in cls._GOOD_STRINGS:
ret = val.upper()
elif isinstance(val, list):
if (len(val) == 1 and isinstance(val[0], str)
and val[0].upper() in cls._GOOD_STRINGS):
ret = val[0].upper()
else:
ret = [cls.sanitize_for_api(ival) for ival in val]
if ret is not None:
return ret
# Not a list, not a good value
raise ValueError(_("Invalid value '%(bad_val)s'. Expected one of "
"%(good_vals)s, or a list.") %
{'bad_val': val, 'good_vals': str(cls._GOOD_STRINGS)})
@classmethod
def marshal(cls, val):
"""Produce a string suitable for the REST API."""
val = cls.const_or_list(val)
return (' '.join([str(ival) for ival in val]) if isinstance(val, list)
else val)
class VLANList(_AllowedList):
"""For REST fields of type AllowedVLANIDs.Union."""
@staticmethod
def parse_val(val):
return int(val)
@staticmethod
def sanitize_for_api(val):
try:
return int(val)
except (ValueError, TypeError):
raise ValueError("Specify a list of VLAN integers or integer "
"strings; or 'ALL' for all VLANS or 'NONE' for "
"no VLANS.")
class MACList(_AllowedList):
"""For REST fields of type AllowedMACAddresses.Union."""
# Default parse_val is fine
@staticmethod
def sanitize_for_api(val):
return sanitize_mac_for_api(val)
|
{
"content_hash": "c6efd3f869f5be2ffc67ea13baf2a572",
"timestamp": "",
"source": "github",
"line_count": 620,
"max_line_length": 79,
"avg_line_length": 39.38548387096774,
"alnum_prop": 0.6161595478930341,
"repo_name": "powervm/pypowervm",
"id": "aff9a8b2b4eb7ddbeed8969609deed2d4b154b03",
"size": "25052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pypowervm/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2216148"
}
],
"symlink_target": ""
}
|
"""
Test the iosconfig Execution module.
"""
import textwrap
import salt.modules.iosconfig as iosconfig
# Import Salt modules
from salt.utils.odict import OrderedDict
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
class TestModulesIOSConfig(TestCase, LoaderModuleMockMixin):
running_config = textwrap.dedent(
"""\
interface GigabitEthernet1
ip address dhcp
negotiation auto
no mop enabled
!
interface GigabitEthernet2
ip address 172.20.0.1 255.255.255.0
shutdown
negotiation auto
!
interface GigabitEthernet3
no ip address
shutdown
negotiation auto
!"""
)
candidate_config = textwrap.dedent(
"""\
interface GigabitEthernet1
ip address dhcp
negotiation auto
no mop enabled
!
interface GigabitEthernet2
no ip address
shutdown
negotiation auto
!
interface GigabitEthernet3
no ip address
negotiation auto
!
router bgp 65000
bgp log-neighbor-changes
neighbor 1.1.1.1 remote-as 12345
!
!"""
)
merge_config = textwrap.dedent(
"""\
router bgp 65000
bgp log-neighbor-changes
neighbor 1.1.1.1 remote-as 12345
!
!
virtual-service csr_mgmt
!
ip forward-protocol nd
!"""
)
def setup_loader_modules(self):
return {}
def test_tree(self):
running_config_tree = OrderedDict(
[
(
"interface GigabitEthernet1",
OrderedDict(
[
("ip address dhcp", OrderedDict()),
("negotiation auto", OrderedDict()),
("no mop enabled", OrderedDict()),
]
),
),
(
"interface GigabitEthernet2",
OrderedDict(
[
("ip address 172.20.0.1 255.255.255.0", OrderedDict()),
("shutdown", OrderedDict()),
("negotiation auto", OrderedDict()),
]
),
),
(
"interface GigabitEthernet3",
OrderedDict(
[
("no ip address", OrderedDict()),
("shutdown", OrderedDict()),
("negotiation auto", OrderedDict()),
]
),
),
]
)
tree = iosconfig.tree(config=self.running_config)
self.assertEqual(tree, running_config_tree)
def test_clean(self):
clean_running_config = textwrap.dedent(
"""\
interface GigabitEthernet1
ip address dhcp
negotiation auto
no mop enabled
interface GigabitEthernet2
ip address 172.20.0.1 255.255.255.0
shutdown
negotiation auto
interface GigabitEthernet3
no ip address
shutdown
negotiation auto
"""
)
clean = iosconfig.clean(config=self.running_config)
self.assertEqual(clean, clean_running_config)
def test_merge_tree(self):
expected_merge_tree = OrderedDict(
[
(
"interface GigabitEthernet1",
OrderedDict(
[
("ip address dhcp", OrderedDict()),
("negotiation auto", OrderedDict()),
("no mop enabled", OrderedDict()),
]
),
),
(
"interface GigabitEthernet2",
OrderedDict(
[
("ip address 172.20.0.1 255.255.255.0", OrderedDict()),
("shutdown", OrderedDict()),
("negotiation auto", OrderedDict()),
]
),
),
(
"interface GigabitEthernet3",
OrderedDict(
[
("no ip address", OrderedDict()),
("shutdown", OrderedDict()),
("negotiation auto", OrderedDict()),
]
),
),
(
"router bgp 65000",
OrderedDict(
[
("bgp log-neighbor-changes", OrderedDict()),
("neighbor 1.1.1.1 remote-as 12345", OrderedDict()),
]
),
),
("virtual-service csr_mgmt", OrderedDict()),
("ip forward-protocol nd", OrderedDict()),
]
)
merge_tree = iosconfig.merge_tree(
initial_config=self.running_config, merge_config=self.merge_config
)
self.assertEqual(merge_tree, expected_merge_tree)
def test_merge_text(self):
extected_merge_text = textwrap.dedent(
"""\
interface GigabitEthernet1
ip address dhcp
negotiation auto
no mop enabled
interface GigabitEthernet2
ip address 172.20.0.1 255.255.255.0
shutdown
negotiation auto
interface GigabitEthernet3
no ip address
shutdown
negotiation auto
router bgp 65000
bgp log-neighbor-changes
neighbor 1.1.1.1 remote-as 12345
virtual-service csr_mgmt
ip forward-protocol nd
"""
)
merge_text = iosconfig.merge_text(
initial_config=self.running_config, merge_config=self.merge_config
)
self.assertEqual(merge_text, extected_merge_text)
def test_merge_diff(self):
expected_diff = textwrap.dedent(
"""\
@@ -10,3 +10,8 @@
no ip address
shutdown
negotiation auto
+router bgp 65000
+ bgp log-neighbor-changes
+ neighbor 1.1.1.1 remote-as 12345
+virtual-service csr_mgmt
+ip forward-protocol nd
"""
)
diff = iosconfig.merge_diff(
initial_config=self.running_config, merge_config=self.merge_config
)
self.assertEqual(diff.splitlines()[2:], expected_diff.splitlines())
def test_diff_text(self):
expected_diff = textwrap.dedent(
"""\
@@ -3,10 +3,12 @@
negotiation auto
no mop enabled
interface GigabitEthernet2
- ip address 172.20.0.1 255.255.255.0
+ no ip address
shutdown
negotiation auto
interface GigabitEthernet3
no ip address
- shutdown
negotiation auto
+router bgp 65000
+ bgp log-neighbor-changes
+ neighbor 1.1.1.1 remote-as 12345
"""
)
diff = iosconfig.diff_text(
candidate_config=self.candidate_config, running_config=self.running_config
)
self.assertEqual(diff.splitlines()[2:], expected_diff.splitlines())
|
{
"content_hash": "4834cfbb6e7c7945ecbd82bcab107687",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 86,
"avg_line_length": 30.932806324110672,
"alnum_prop": 0.4543828264758497,
"repo_name": "saltstack/salt",
"id": "d8b6dd1d69a32912285feebdc6ed7dcda91230ab",
"size": "7826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/modules/test_iosconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
from json import loads,dumps
import importlib as imp
import pip
import sys
from time import sleep
debug = True
# dependency Installer and installer
class Installer(object):
def __init__(self, Uninstall= False):
print("Fetching data...")
jsonFile="modules.json"
self.data = self.fetchJson(jsonFile)
self.missing = self.getMissingModules()
if (Uninstall == True):
self.UninstallModules()
def setup(self):
self.updateJson()
self.missing = self.getMissingModules()
if (debug):
self.debugInfo()
if (self.missing != []):
proceed = str.lower(input("Would you like to install missing modules(y/n):%s"%str(self.missing)))
if (proceed == "y"):
for i in self.missing:
status = self.installModule(i)
if (status == 0):
print("Successfully installed module:"+str(i))
else:
print("Failure installing module:"+str(i))
self.updateJson()
self.missing = self.getMissingModules()
self.debugInfo()
input("Completed.\nPush enter to exit")
else:
input("No modules missing.\nPush enter to exit")
def updateJson(self):
for i in range(0,len(self.data["modules"])):
if(self.checkIfInstalled(self.data["modules"][i]["name"])):
self.updateModuleStatus(i,1)
if (debug):
print("Module found:"+self.data["modules"][i]["name"])
else:
self.updateModuleStatus(i,0)
self.missing.append(self.data["modules"][i]["name"])
if (debug):
print("Module missing:"+self.data["modules"][i]["name"])
def checkIfInstalled(self,module):
status = imp.find_loader(module)
state = False
if status is not None:
return True
else:
return False
def fetchJson(self,Jfile):
json = open(Jfile,"r").read()
parse = loads(json)
return parse
def getMissingModules(self):
temp = []
for i in range(0,len(self.data["modules"])):
if (self.data["modules"][i]["status"] == "0"):
temp.append(self.data["modules"][i]["name"])
return temp
def updateModuleStatus(self,moduleIndex,newValue):
self.data["modules"][moduleIndex]["status"] = str(newValue)
dump = dumps(self.data)
x = open("modules.json","w")
x.write(dump)
x.close()
def installModule(self,inMod):
try:
status = pip.main((["install", inMod]))
return status
except Exception:
print("PIP module required.\nPlease install externally")
def debugInfo(self):
for i in self.data["modules"]:
sys.stdout.write("Name:"+i["name"])
sys.stdout.write("\t\tStatus:"+i["status"]+"\n")
if self.missing != []:
print("Missing:"+str(self.missing))
# print(i["status:"])
def distributionReset(self):
for i in range(0,len(self.data["modules"])):
self.updateModuleStatus(i,0)
print("Reset completed")
def UninstallModules(self):
try:
for i in range(0,len(self.data["modules"])):
status = pip.main((["uninstall", self.data["modules"][i]["name"]]))
except Exception:
print("Module not installed.Cannot be uninstalled")
print("Modules uninstalled.\nRestart the program with no arguments to the installer to reinstall")
sys.exit()
if __name__ == "__main__":
# Use argument "True" in constructor to uninstall modules.
# Comment out x.setup() during uninstall execution
x = Installer()
x.setup()
|
{
"content_hash": "78c7512245b3288f38a45722f3de3622",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 109,
"avg_line_length": 30.325581395348838,
"alnum_prop": 0.5465235173824131,
"repo_name": "joshsbirch/assistant",
"id": "014406beabb3f52525b48126ef27397b5f6625d1",
"size": "3912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Installer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7537"
}
],
"symlink_target": ""
}
|
from bson.objectid import ObjectId
from celery.task import Task
from apps.social.models import MSharedStory, MSocialProfile, MSocialServices, MSocialSubscription
from django.contrib.auth.models import User
from utils import log as logging
class PostToService(Task):
def run(self, shared_story_id, service):
try:
shared_story = MSharedStory.objects.get(id=shared_story_id)
shared_story.post_to_service(service)
except MSharedStory.DoesNotExist:
logging.debug(" ---> Shared story not found (%s). Can't post to: %s" % (shared_story_id, service))
class EmailNewFollower(Task):
def run(self, follower_user_id, followee_user_id):
user_profile = MSocialProfile.get_user(followee_user_id)
user_profile.send_email_for_new_follower(follower_user_id)
class EmailFollowRequest(Task):
def run(self, follower_user_id, followee_user_id):
user_profile = MSocialProfile.get_user(followee_user_id)
user_profile.send_email_for_follow_request(follower_user_id)
class EmailFirstShare(Task):
def run(self, user_id):
user = User.objects.get(pk=user_id)
user.profile.send_first_share_to_pytuneblog_email()
class EmailCommentReplies(Task):
def run(self, shared_story_id, reply_id):
shared_story = MSharedStory.objects.get(id=shared_story_id)
shared_story.send_emails_for_new_reply(reply_id)
class EmailStoryReshares(Task):
def run(self, shared_story_id):
shared_story = MSharedStory.objects.get(id=shared_story_id)
shared_story.send_email_for_reshare()
class SyncTwitterFriends(Task):
def run(self, user_id):
social_services = MSocialServices.objects.get(user_id=user_id)
social_services.sync_twitter_friends()
class SyncFacebookFriends(Task):
def run(self, user_id):
social_services = MSocialServices.objects.get(user_id=user_id)
social_services.sync_facebook_friends()
class SyncAppdotnetFriends(Task):
def run(self, user_id):
social_services = MSocialServices.objects.get(user_id=user_id)
social_services.sync_appdotnet_friends()
class SharePopularStories(Task):
name = 'share-popular-stories'
def run(self, **kwargs):
logging.debug(" ---> Sharing popular stories...")
MSharedStory.share_popular_stories(interactive=False)
class CleanSpam(Task):
name = 'clean-spam'
def run(self, **kwargs):
logging.debug(" ---> Finding spammers...")
MSharedStory.count_potential_spammers(destroy=True)
class UpdateRecalcForSubscription(Task):
def run(self, subscription_user_id, shared_story_id):
user = User.objects.get(pk=subscription_user_id)
socialsubs = MSocialSubscription.objects.filter(subscription_user_id=subscription_user_id)
try:
shared_story = MSharedStory.objects.get(id=ObjectId(shared_story_id))
except MSharedStory.DoesNotExist:
return
logging.debug(" ---> ~FM~SNFlipping unread recalc for ~SB%s~SN subscriptions to ~SB%s's pytuneblog~SN" % (
socialsubs.count(),
user.username
))
for socialsub in socialsubs:
socialsub.needs_unread_recalc = True
socialsub.save()
shared_story.publish_update_to_subscribers()
|
{
"content_hash": "53c3989d6e22152f52dbb778d0be957a",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 114,
"avg_line_length": 35.39795918367347,
"alnum_prop": 0.6555203228596137,
"repo_name": "Einsteinish/PyTune3",
"id": "f6727e96eb2672f0476c3abd62f01337e9df45cc",
"size": "3469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/social/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "569697"
},
{
"name": "CoffeeScript",
"bytes": "6745"
},
{
"name": "HTML",
"bytes": "281641"
},
{
"name": "JavaScript",
"bytes": "1547332"
},
{
"name": "Nginx",
"bytes": "897"
},
{
"name": "Objective-C",
"bytes": "4530"
},
{
"name": "Perl",
"bytes": "55598"
},
{
"name": "Python",
"bytes": "1549865"
},
{
"name": "R",
"bytes": "523"
},
{
"name": "Shell",
"bytes": "40404"
}
],
"symlink_target": ""
}
|
TRAIN_FILE = 'train.txt'
TRAIN_DB = 'train_db'
VAL_FILE = 'val.txt'
VAL_DB = 'val_db'
TEST_FILE = 'test.txt'
TEST_DB = 'test_db'
MEAN_FILE_IMAGE = 'mean.jpg'
# Classification jobs
LABELS_FILE = 'labels.txt'
DEFAULT_BATCH_SIZE = 16
# Caffe Protocol Buffers
MEAN_FILE_CAFFE = 'mean.binaryproto'
|
{
"content_hash": "c8a98985d4776be69db3c8754472ad9c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 36,
"avg_line_length": 18.5625,
"alnum_prop": 0.6868686868686869,
"repo_name": "RadicoLabs/DIGITS",
"id": "b9b8ec9c5575b0ec6fbbcc810243c61b468f9ce4",
"size": "378",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "digits/utils/constants.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "711"
},
{
"name": "HTML",
"bytes": "153301"
},
{
"name": "JavaScript",
"bytes": "107641"
},
{
"name": "Python",
"bytes": "465324"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
}
|
import numpy as _np
from DeepFried2 import floatX as _floatX
def data(T, N, nterms=2):
"""
`T` is the sequence length, `N` is the number of sequences and `nterms` is
the amount of terms that should be "active" in the stream.
Returned array is of shape (N, T, 2)
"""
X = _np.random.rand(N, T).astype(_floatX)
mask = _np.zeros((N, T), _floatX)
# Need to do this instead of just randint(0, T, (bs,nterms)) because we always need nterms distinct ones.
for i in range(N):
mask[i,_np.random.choice(T, size=nterms, replace=False)] = 1
y = _np.sum(X[mask > 0].reshape((-1,nterms)), axis=1)
X = _np.stack([X, mask], axis=-1)
return X, y[:,None]
|
{
"content_hash": "192f16562ef1a227c13a56b1108e8ea3",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 109,
"avg_line_length": 33.19047619047619,
"alnum_prop": 0.6169296987087518,
"repo_name": "yobibyte/DeepFried2",
"id": "cb6c4d716775269c96becde3867e180193c3cfa8",
"size": "697",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "DeepFried2/datasets/sequence_sum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "113852"
}
],
"symlink_target": ""
}
|
"""Utilities related to glance image management for the PowerVM driver."""
from nova import utils
def stream_blockdev_to_glance(context, image_api, image_id, metadata, devpath):
"""Stream the entire contents of a block device to a glance image.
:param context: Nova security context.
:param image_api: Handle to the glance image API.
:param image_id: UUID of the prepared glance image.
:param metadata: Dictionary of metadata for the image.
:param devpath: String path to device file of block device to be uploaded,
e.g. "/dev/sde".
"""
# Make the device file owned by the current user for the duration of the
# operation.
with utils.temporary_chown(devpath), open(devpath, 'rb') as stream:
# Stream it. This is synchronous.
image_api.update(context, image_id, metadata, stream)
def generate_snapshot_metadata(context, image_api, image_id, instance):
"""Generate a metadata dictionary for an instance snapshot.
:param context: Nova security context.
:param image_api: Handle to the glance image API.
:param image_id: UUID of the prepared glance image.
:param instance: The Nova instance whose disk is to be snapshotted.
:return: A dict of metadata suitable for image_api.update.
"""
image = image_api.get(context, image_id)
# TODO(esberglu): Update this to v2 metadata
metadata = {
'name': image['name'],
'is_public': False,
'status': 'active',
'disk_format': 'raw',
'container_format': 'bare',
'properties': {
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance.project_id,
}
}
return metadata
|
{
"content_hash": "a7629b6e11763a1f52c00ca86e554ec4",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 79,
"avg_line_length": 37.170212765957444,
"alnum_prop": 0.6514024041213509,
"repo_name": "gooddata/openstack-nova",
"id": "1d8e497e85a4684d449c252da5a3748ef05b29a2",
"size": "2380",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/virt/powervm/image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3858"
},
{
"name": "HTML",
"bytes": "1386"
},
{
"name": "PHP",
"bytes": "43584"
},
{
"name": "Python",
"bytes": "23012372"
},
{
"name": "Shell",
"bytes": "32567"
},
{
"name": "Smarty",
"bytes": "429290"
}
],
"symlink_target": ""
}
|
"""Init for `cltk.lemmatize`."""
from .processes import *
|
{
"content_hash": "375bf374927c3511c376ba4dfebee353",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 32,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.6290322580645161,
"repo_name": "diyclassics/cltk",
"id": "22a16fa700282bb8d732affff67d4958eddaf850",
"size": "62",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/cltk/lemmatize/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "128319"
},
{
"name": "Makefile",
"bytes": "2296"
},
{
"name": "Python",
"bytes": "3335682"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
# Import the fastest implementation of
# pickle package. This should be removed
# when python3 come the unique supported
# python version
try:
import cPickle as pickle
except ImportError:
import pickle
import json
import msgpack
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import force_text
try:
from django.utils.encoding import force_bytes
except ImportError:
from django.utils.encoding import smart_bytes as force_bytes
class BaseSerializer(object):
def __init__(self, options):
pass
def dumps(self, value):
raise NotImplementedError
def loads(self, value):
raise NotImplementedError
class PickleSerializer(BaseSerializer):
def __init__(self, options):
self._pickle_version = -1
self.setup_pickle_version(options)
def setup_pickle_version(self, options):
if "PICKLE_VERSION" in options:
try:
self._pickle_version = int(self._options["PICKLE_VERSION"])
except (ValueError, TypeError):
raise ImproperlyConfigured("PICKLE_VERSION value must be an integer")
def dumps(self, value):
return pickle.dumps(value, self._pickle_version)
def loads(self, value):
return pickle.loads(force_bytes(value))
class JSONSerializer(BaseSerializer):
def dumps(self, value):
return force_bytes(json.dumps(value))
def loads(self, value):
return json.loads(force_text(value))
class MSGPackSerializer(BaseSerializer):
def dumps(self, value):
return msgpack.dumps(value)
def loads(self, value):
return msgpack.loads(value, encoding='utf-8')
|
{
"content_hash": "f42e5c069ba49cd94ea4df3edd4cdd24",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 85,
"avg_line_length": 26.044776119402986,
"alnum_prop": 0.6893982808022923,
"repo_name": "zl352773277/django-redis",
"id": "6c3a791b684f9dd3339b4781246065e160d6c0e8",
"size": "1770",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django_redis/serializers/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "79781"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.auditors.sqs
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Denver Janke <denverjanke@gmail.com>
"""
from security_monkey.auditors.resource_policy_auditor import ResourcePolicyAuditor
from security_monkey.watchers.sqs import SQS
class SQSAuditor(ResourcePolicyAuditor):
index = SQS.index
i_am_singular = SQS.i_am_singular
i_am_plural = SQS.i_am_plural
def __init__(self, accounts=None, debug=False):
super(SQSAuditor, self).__init__(accounts=accounts, debug=debug)
self.policy_keys = ['Policy']
|
{
"content_hash": "d19bdd95ed841f78628ecd24672406df",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 82,
"avg_line_length": 27.61904761904762,
"alnum_prop": 0.7034482758620689,
"repo_name": "markofu/security_monkey",
"id": "d409f19b2b6480a2e33b00846bd8c46a90f4f115",
"size": "1197",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "security_monkey/auditors/sqs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22116"
},
{
"name": "Dart",
"bytes": "86565"
},
{
"name": "HTML",
"bytes": "80747"
},
{
"name": "JavaScript",
"bytes": "8629"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "483322"
},
{
"name": "Shell",
"bytes": "19151"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, unicode_literals
from Foundation import NSPoint
__doc__="""
Create a center guideline between two guides.
"""
font = Glyphs.font
glyphs = font.glyphs
# get active layer
layer = font.selectedLayers[0]
def average(number1, number2):
return (number1 + number2) / 2.0
selected_guides = []
# access all guides and pick only the ones that are selected
for guide in layer.guides:
if guide.selected:
selected_guides.append(guide)
ang = 0.0
if len(selected_guides) == 2:
x1 = selected_guides[0].x
y1 = selected_guides[0].y
x2 = selected_guides[1].x
y2 = selected_guides[1].y
x3 = average(x1, x2)
y3 = average(y1, y2)
x4 = x1 - x2
y4 = y1 - y2
if x4 == max((abs(x4)), (abs(y4))):
ang = 90.0
else:
print("Select two guides first (and only two...)")
## add guideline
newGuide = GSGuideLine()
newGuide.position = NSPoint(x3, y3)
newGuide.angle = ang
layer.guides.append(newGuide)
# Glyphs.showMacroWindow()
|
{
"content_hash": "aa124e52c030bf20fef9758f884481f2",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 65,
"avg_line_length": 17.87272727272727,
"alnum_prop": 0.6856561546286877,
"repo_name": "filipenegrao/glyphsapp-scripts",
"id": "db03d4c515c27f3b76471d8b98fcc6dfdf8b4174",
"size": "1067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "guides/create_center_guideline_between2guides.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "235"
},
{
"name": "Python",
"bytes": "135574"
}
],
"symlink_target": ""
}
|
from platform import machine as pm
import logging
import os
import winreg
liblogger = logging.getLogger('steamclean.libproviders')
def winreg_read(keypath, subkeyname):
""" Get provider installation path from reading registry data.
If unable to read registry information prompt user for input. """
system_type = pm()
regbase = 'HKEY_LOCAL_MACHINE\\'
regkey = None
# use architecture returned to evaluate appropriate registry key
if system_type == 'AMD64':
regpath = 'SOFTWARE\Wow6432Node\\' + keypath
regopts = (winreg.KEY_WOW64_64KEY + winreg.KEY_READ)
elif system_type == 'i386':
liblogger.info('32 bit operating system detected')
regpath = 'SOFTWARE\\' + keypath
regopts = winreg.KEY_READ
else:
liblogger.error('Unable to determine system architecture.')
raise ValueError('ERROR: Unable to determine system architecture.')
try:
regkey = winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, regpath, 0,
regopts)
# Save installation path value and close open registry key.
ipath = winreg.QueryValueEx(regkey, subkeyname)[0]
installpath = os.path.abspath(ipath.strip())
return installpath
except PermissionError:
liblogger.error('Permission denied to read registry key',
regbase + regpath)
liblogger.error('Run this script as administrator to resolve.')
print('Permission denied to read registry data at %s.', regpath)
return None
except FileNotFoundError:
fullkeypath = '\\'.join(s.strip('\\') for s in [regbase, regpath,
subkeyname])
liblogger.warn('Registry key not found at %s', fullkeypath)
except:
liblogger.exception('Unknown exception raised')
return None
finally:
# Ensure registry key is closed after reading as applicable.
if regkey is not None:
liblogger.info('Registry data at %s used to determine ' +
'installation path', regbase + regpath)
winreg.CloseKey(regkey)
|
{
"content_hash": "2d0e1ca0634b88b3281a8a8719d1d3ed",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 35.73770491803279,
"alnum_prop": 0.6293577981651376,
"repo_name": "evitalis/steamclean",
"id": "32f0e8253163403f7d4c531f5351ed3931004cca",
"size": "2274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "providers/libproviders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31257"
}
],
"symlink_target": ""
}
|
import mock
from st2common.models.db.trigger import TriggerDB
from st2common.transport.publishers import PoolPublisher
import st2reactor.container.utils as container_utils
from st2tests.base import CleanDbTestCase
MOCK_TRIGGER_TYPE = {}
MOCK_TRIGGER_TYPE['id'] = 'trigger-type-test.id'
MOCK_TRIGGER_TYPE['name'] = 'trigger-type-test.name'
MOCK_TRIGGER_TYPE['pack'] = 'dummy_pack_1'
MOCK_TRIGGER_TYPE['parameters_schema'] = {}
MOCK_TRIGGER_TYPE['payload_schema'] = {}
MOCK_TRIGGER = TriggerDB()
MOCK_TRIGGER.id = 'trigger-test.id'
MOCK_TRIGGER.name = 'trigger-test.name'
MOCK_TRIGGER.pack = 'dummy_pack_1'
MOCK_TRIGGER.parameters = {}
MOCK_TRIGGER.type = 'dummy_pack_1.trigger-type-test.name'
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class ContainerUtilsTest(CleanDbTestCase):
def test_create_trigger_instance_invalid_trigger(self):
trigger_instance = 'dummy_pack.footrigger'
instance = container_utils.create_trigger_instance(trigger_instance, {}, None)
self.assertTrue(instance is None)
|
{
"content_hash": "c22ad0a6e55175440c457d9f96060a47",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 86,
"avg_line_length": 36,
"alnum_prop": 0.7509578544061303,
"repo_name": "pinterb/st2",
"id": "11c4291c4e0ef83a08f54b137a081e896196dfe6",
"size": "1824",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "st2reactor/tests/unit/test_container_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "19687"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "1814798"
},
{
"name": "Shell",
"bytes": "7150"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
print("VAR is '{}'".format(os.environ["VAR"]))
|
{
"content_hash": "1a78f8d733185b1f0cc8c7d33ac2a53e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 82,
"avg_line_length": 23.833333333333332,
"alnum_prop": 0.7132867132867133,
"repo_name": "kageiit/buck",
"id": "eb18f87f2a6ef17d2166fa2c4f51f0de8e68836c",
"size": "143",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/com/facebook/buck/cli/testdata/run-command/cmd/echo_var.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1692"
},
{
"name": "C",
"bytes": "250514"
},
{
"name": "CSS",
"bytes": "56119"
},
{
"name": "Dockerfile",
"bytes": "2094"
},
{
"name": "HTML",
"bytes": "11770"
},
{
"name": "Java",
"bytes": "33114896"
},
{
"name": "JavaScript",
"bytes": "931240"
},
{
"name": "Kotlin",
"bytes": "310039"
},
{
"name": "Lex",
"bytes": "14469"
},
{
"name": "Makefile",
"bytes": "1704"
},
{
"name": "PowerShell",
"bytes": "2154"
},
{
"name": "Python",
"bytes": "2152087"
},
{
"name": "Shell",
"bytes": "43626"
},
{
"name": "Smalltalk",
"bytes": "194"
},
{
"name": "Thrift",
"bytes": "18638"
}
],
"symlink_target": ""
}
|
"""OpenID 2.0 - Requesting Authentication
Ref: https://openid.net/specs/openid-authentication-2_0.html#requesting_authentication
"""
from urllib.parse import urlencode
from uuid import uuid4
from datetime import datetime, timezone
from requests import get
from .utils import create_return_to
class Authentication:
"""Authentication initialization
Note:
Based on OpenID specification
https://openid.net/specs/openid-authentication-2_0.html
Args:
mode
ns
identity
claimed_id
return_to
request_id
Attributes:
mode
ns
identity
claimed_id
return_to
request_id
"""
def __init__(self, mode=None, ns=None, identity=None,
claimed_id=None, return_to=None, request_id=None):
self.mode = mode or 'checkid_setup'
self.ns = ns or 'http://specs.openid.net/auth/2.0'
self.identity = identity or 'http://specs.openid.net/auth/2.0/' \
'identifier_select'
self.claimed_id = claimed_id or 'http://specs.openid.net/auth/2.0/' \
'identifier_select'
self.request_id = request_id or uuid4().hex
self.return_to = return_to or create_return_to(self.request_id)
def authenticate(self, where, request_id=None):
"""Process to authenticate a request based on few data
On this step, the most important information is the request_id.
This parameter will allow us to recover this transaction on
return url.
"""
request = get(self.destination(where), allow_redirects=False)
location = request.headers['Location']
return location
@property
def payload(self):
"""Prepare the OpenID payload to authenticate this request"""
return {
'openid.mode': self.mode,
'openid.ns': self.ns,
'openid.identity': self.identity,
'openid.claimed_id': self.claimed_id,
'openid.return_to': self.return_to,
}
def convert(self, payload):
"""Convert the OpenID payload on QueryString format"""
return urlencode(payload)
def destination(self, base):
"""Full destination URL to send the payload"""
return base + '?' + self.convert(self.payload)
@property
def evidence(self):
"""This function could be used to get an evidence about what requests
were sent.
Example:
{
'openid.claimed_id': 'http://specs.openid.net/auth/2.0/identifier_select',
'openid.identity': 'http://specs.openid.net/auth/2.0/identifier_select',
'openid.mode': 'checkid_setup',
'openid.ns': 'http://specs.openid.net/auth/2.0',
'openid.return_to': 'https://requestb.in/1e7ing31?request_id=07c52d8bb36c4412a4f7e133be9b08ee',
'request_id': '07c52d8bb36c4412a4f7e133be9b08ee',
'timestamp': datetime.datetime(2017, 8, 9, 12, 12, 36, 735736, tzinfo=datetime.timezone.utc)
}
"""
evidence = {}
evidence.update(self.payload)
evidence.update({'request_id': self.request_id})
evidence.update({'timestamp': datetime.now(timezone.utc)})
return evidence
|
{
"content_hash": "1b386fdf11fe8513f052e71a2ad66548",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 104,
"avg_line_length": 32.71287128712871,
"alnum_prop": 0.6083535108958837,
"repo_name": "mac-developer/openid-wargaming",
"id": "48192b791ad102c83ffcc3557765dee1749bfce8",
"size": "3304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openid_wargaming/authentication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "24394"
}
],
"symlink_target": ""
}
|
'''
This program demonstrates Laplace point/edge detection using
OpenCV function Laplacian()
It captures from the camera of your choice: 0, 1, ... default 0
Usage:
python laplace.py <ddepth> <smoothType> <sigma>
If no arguments given default arguments will be used.
Keyboard Shortcuts:
Press space bar to exit the program.
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
def main():
# Declare the variables we are going to use
ddepth = cv.CV_16S
smoothType = "MedianBlur"
sigma = 3
if len(sys.argv)==4:
ddepth = sys.argv[1]
smoothType = sys.argv[2]
sigma = sys.argv[3]
# Taking input from the camera
cap=cv.VideoCapture(0)
# Create Window and Trackbar
cv.namedWindow("Laplace of Image", cv.WINDOW_AUTOSIZE)
cv.createTrackbar("Kernel Size Bar", "Laplace of Image", sigma, 15, lambda x:x)
# Printing frame width, height and FPS
print("=="*40)
print("Frame Width: ", cap.get(cv.CAP_PROP_FRAME_WIDTH), "Frame Height: ", cap.get(cv.CAP_PROP_FRAME_HEIGHT), "FPS: ", cap.get(cv.CAP_PROP_FPS))
while True:
# Reading input from the camera
ret, frame = cap.read()
if ret == False:
print("Can't open camera/video stream")
break
# Taking input/position from the trackbar
sigma = cv.getTrackbarPos("Kernel Size Bar", "Laplace of Image")
# Setting kernel size
ksize = (sigma*5)|1
# Removing noise by blurring with a filter
if smoothType == "GAUSSIAN":
smoothed = cv.GaussianBlur(frame, (ksize, ksize), sigma, sigma)
if smoothType == "BLUR":
smoothed = cv.blur(frame, (ksize, ksize))
if smoothType == "MedianBlur":
smoothed = cv.medianBlur(frame, ksize)
# Apply Laplace function
laplace = cv.Laplacian(smoothed, ddepth, 5)
# Converting back to uint8
result = cv.convertScaleAbs(laplace, (sigma+1)*0.25)
# Display Output
cv.imshow("Laplace of Image", result)
k = cv.waitKey(30)
if k == 27:
return
if __name__ == "__main__":
print(__doc__)
main()
cv.destroyAllWindows()
|
{
"content_hash": "f9aa30fe5044f8c30f9b8d1eef4313bb",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 148,
"avg_line_length": 34.11940298507463,
"alnum_prop": 0.6119860017497812,
"repo_name": "opencv/opencv",
"id": "f485e5741ce640651977bf541da935e90b75f477",
"size": "2309",
"binary": false,
"copies": "2",
"ref": "refs/heads/4.x",
"path": "samples/python/laplace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "1986"
},
{
"name": "Batchfile",
"bytes": "1498"
},
{
"name": "C",
"bytes": "1543870"
},
{
"name": "C++",
"bytes": "35975082"
},
{
"name": "CMake",
"bytes": "1010867"
},
{
"name": "Cuda",
"bytes": "333437"
},
{
"name": "Dockerfile",
"bytes": "309"
},
{
"name": "HTML",
"bytes": "40027"
},
{
"name": "Java",
"bytes": "774232"
},
{
"name": "JavaScript",
"bytes": "233673"
},
{
"name": "Kotlin",
"bytes": "5204"
},
{
"name": "Objective-C",
"bytes": "100731"
},
{
"name": "Objective-C++",
"bytes": "392600"
},
{
"name": "Perl",
"bytes": "15865"
},
{
"name": "PowerShell",
"bytes": "14591"
},
{
"name": "Prolog",
"bytes": "843"
},
{
"name": "Python",
"bytes": "1038154"
},
{
"name": "Shell",
"bytes": "22738"
},
{
"name": "Swift",
"bytes": "301765"
},
{
"name": "TeX",
"bytes": "3530"
}
],
"symlink_target": ""
}
|
import pkg_resources
###### Files ######
# Gene panel:
panel_file = "demo/panel_1.txt"
panelapp_file = "demo/panelapp_test_panel.json"
madeline_file = "demo/madeline.xml"
# Case info
ped_file = "demo/643594.ped"
load_file = "demo/643594.config.yaml"
cancer_load_file = "demo/cancer.load_config.yaml"
clinical_snv_file = "demo/643594.clinical.vcf.gz"
research_snv_file = "demo/643594.research.vcf.gz"
customannotation_snv_file = "demo/customannotations_one.vcf.gz"
vep_97_annotated_snv_file = "demo/vep97_annotated_clnsig_conservation_revel.vcf"
vep_104_annotated_snv_file = "demo/vep104_annotated.vcf"
manta_annotated_sv_cancer_file = "demo/manta_vep_94_annotated_sv_cancer_file.vcf.gz"
cancer_snv_file = "demo/cancer_test.vcf.gz"
ped_path = pkg_resources.resource_filename("scout", ped_file)
clinical_sv_file = "demo/643594.clinical.SV.vcf.gz"
research_sv_file = "demo/643594.research.SV.vcf.gz"
empty_sv_file = "demo/empty.clinical.SV.vcf.gz"
clinical_str_file = "demo/643594.clinical.str.stranger.vcf.gz"
panel_path = pkg_resources.resource_filename("scout", panel_file)
panelapp_panel_path = pkg_resources.resource_filename("scout", panelapp_file)
madeline_path = pkg_resources.resource_filename("scout", madeline_file)
load_path = pkg_resources.resource_filename("scout", load_file)
cancer_load_path = pkg_resources.resource_filename("scout", cancer_load_file)
clinical_snv_path = pkg_resources.resource_filename("scout", clinical_snv_file)
clinical_sv_path = pkg_resources.resource_filename("scout", clinical_sv_file)
clinical_str_path = pkg_resources.resource_filename("scout", clinical_str_file)
customannotation_snv_path = pkg_resources.resource_filename("scout", customannotation_snv_file)
vep_97_annotated_path = pkg_resources.resource_filename("scout", vep_97_annotated_snv_file)
vep_104_annotated_path = pkg_resources.resource_filename("scout", vep_104_annotated_snv_file)
research_snv_path = pkg_resources.resource_filename("scout", research_snv_file)
research_sv_path = pkg_resources.resource_filename("scout", research_sv_file)
cancer_snv_path = pkg_resources.resource_filename("scout", cancer_snv_file)
cancer_sv_path = pkg_resources.resource_filename("scout", manta_annotated_sv_cancer_file)
empty_sv_clinical_path = pkg_resources.resource_filename("scout", empty_sv_file)
delivery_report_file = "demo/delivery_report.html"
delivery_report_path = pkg_resources.resource_filename("scout", delivery_report_file)
cnv_report_file = "demo/cancer_cnv_report.pdf"
cnv_report_path = pkg_resources.resource_filename("scout", cnv_report_file)
coverage_qc_report_file = "demo/cancer_coverage_qc_report.html"
coverage_qc_report_path = pkg_resources.resource_filename("scout", coverage_qc_report_file)
gene_fusion_report_file = "demo/draw-fusions-example.pdf"
gene_fusion_report_path = pkg_resources.resource_filename("scout", gene_fusion_report_file)
|
{
"content_hash": "28cb8f677152514429c861bdaf5d5c2e",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 95,
"avg_line_length": 47.85,
"alnum_prop": 0.7694183211424591,
"repo_name": "Clinical-Genomics/scout",
"id": "02155d956ef5e168f98955a331c1b03e620186b8",
"size": "2871",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scout/demo/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12516"
},
{
"name": "Dockerfile",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "911931"
},
{
"name": "JavaScript",
"bytes": "32692"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "2419990"
}
],
"symlink_target": ""
}
|
import os
def pytest_addoption(parser):
parser.addoption("--no-optional-skip", action="store_true",
help="don't skip any tests with optional dependencies")
def pytest_configure(config):
if config.getoption('no_optional_skip'):
from .tests import helpers
for attr in helpers.__dict__:
if attr.startswith('requires_'):
# The following line replaces the decorators with a function
# that does noting, effectively disabling it.
setattr(helpers, attr, lambda f: f)
def pytest_report_header(config):
from . import __version__
glue_version = "%20s:\t%s" % ("glue", __version__)
from ._deps import get_status
return os.linesep + glue_version + os.linesep + os.linesep + get_status()
|
{
"content_hash": "2bcd434767788d8998db2e6e6453ff0f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 36.40909090909091,
"alnum_prop": 0.6242197253433208,
"repo_name": "JudoWill/glue",
"id": "50313f5ce39bd78131c0dc8b800ecb49f10624d8",
"size": "801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glue/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "2352"
},
{
"name": "Python",
"bytes": "1387891"
},
{
"name": "Shell",
"bytes": "1968"
}
],
"symlink_target": ""
}
|
import curses
from selenium.common.exceptions import WebDriverException
from libs.javascript.javascriptscript import *
from libs.javascript.javascriptcommands import *
from libs.javascript.jswalker import *
class JavascriptScreen:
def __init__(self, screen, webdriver, curses_util, jsinjector):
self.version=0.1
self.screen = screen
self.driver = webdriver
self.curses_util = curses_util
self.jsinjector = jsinjector
self.commands = JavascriptCommands(self.driver, self.jsinjector)
self.jswalker = JSWalker(self.driver, self.jsinjector)
def show(self):
showscreen = True
while showscreen:
self.screen = self.curses_util.get_screen()
self.screen.addstr(2, 2, "Javascript Tools")
self.screen.addstr(4, 5, "1) Find URLS within Javascript Global Properties")
self.screen.addstr(5, 5, "2) Show Javascript functions of Document")
self.screen.addstr(6, 5, "3) Run all js functions without args")
self.screen.addstr(7, 5, "4) Show Cookies accessable by Javascript")
self.screen.addstr(8, 5, "5) Walk Javascript Functions")
self.screen.addstr(22, 28, "PRESS M FOR MAIN MENU")
self.screen.refresh()
c = self.screen.getch()
if c == ord('M') or c == ord('m'):
showscreen=False
if c == ord('1'):
self.curses_util.close_screen()
self.commands.search_for_urls()
if c == ord('2'):
self.curses_util.close_screen()
self.commands.search_for_document_javascript_methods()
if c == ord('3'):
self.curses_util.close_screen()
self.commands.run_lone_javascript_functions()
if c == ord('4'):
self.curses_util.close_screen()
self.commands.show_cookies()
if c == ord('5'):
self.curses_util.close_screen()
self.jswalker.start_walk_tree()
#self.commands.walk_functions()
return
|
{
"content_hash": "acd5f4579ee208319addb10b6b8e1948",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 28.73015873015873,
"alnum_prop": 0.687292817679558,
"repo_name": "bugbound/webnuke",
"id": "810df3c4132f0648cd1eacd47e13a87350756eeb",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/javascript/javascriptmenu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "7488"
},
{
"name": "Python",
"bytes": "77507"
},
{
"name": "Shell",
"bytes": "420"
}
],
"symlink_target": ""
}
|
class VumiError(Exception):
pass
class InvalidMessage(VumiError):
pass
class InvalidMessageType(VumiError):
pass
class MissingMessageField(InvalidMessage):
pass
class InvalidMessageField(InvalidMessage):
pass
class DuplicateConnectorError(VumiError):
pass
class InvalidEndpoint(VumiError):
"""Raised when attempting to send a message to an invalid endpoint."""
class DispatcherError(VumiError):
"""Raised when an error is encounter while dispatching a message."""
# Re-export this for compatibility.
from confmodel.errors import ConfigError
ConfigError
|
{
"content_hash": "c305203a85506ef8b16fdf81f8cca951",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 74,
"avg_line_length": 16.72222222222222,
"alnum_prop": 0.760797342192691,
"repo_name": "vishwaprakashmishra/xmatrix",
"id": "49d9e9d4b16af960173031a887bb2c8b7f5da9e9",
"size": "602",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vumi/errors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Erlang",
"bytes": "29735"
},
{
"name": "JavaScript",
"bytes": "5556"
},
{
"name": "Puppet",
"bytes": "2557"
},
{
"name": "Python",
"bytes": "2968329"
},
{
"name": "Shell",
"bytes": "3435"
}
],
"symlink_target": ""
}
|
import win32ui
import win32api
from pywin.mfc import docview, dialog, window
import win32con
import string
import re
import glob
import os
import stat
import glob
from . import scriptutils
def getsubdirs(d):
dlist = []
flist = glob.glob(d+'\\*')
for f in flist:
if os.path.isdir(f):
dlist.append(f)
dlist = dlist + getsubdirs(f)
return dlist
class dirpath:
def __init__(self, str, recurse=0):
dp = str.split(';')
dirs = {}
for d in dp:
if os.path.isdir(d):
d = d.lower()
if d not in dirs:
dirs[d] = None
if recurse:
subdirs = getsubdirs(d)
for sd in subdirs:
sd = sd.lower()
if sd not in dirs:
dirs[sd] = None
elif os.path.isfile(d):
pass
else:
x = None
if d in os.environ:
x = dirpath(os.environ[d])
elif d[:5] == 'HKEY_':
keystr = d.split('\\')
try:
root = eval('win32con.'+keystr[0])
except:
win32ui.MessageBox("Can't interpret registry key name '%s'" % keystr[0])
try:
subkey = '\\'.join(keystr[1:])
val = win32api.RegQueryValue(root, subkey)
if val:
x = dirpath(val)
else:
win32ui.MessageBox("Registry path '%s' did not return a path entry" % d)
except:
win32ui.MessageBox("Can't interpret registry key value: %s" % keystr[1:])
else:
win32ui.MessageBox("Directory '%s' not found" % d)
if x:
for xd in x:
if xd not in dirs:
dirs[xd] = None
if recurse:
subdirs = getsubdirs(xd)
for sd in subdirs:
sd = sd.lower()
if sd not in dirs:
dirs[sd] = None
self.dirs = []
for d in list(dirs.keys()):
self.dirs.append(d)
def __getitem__(self, key):
return self.dirs[key]
def __len__(self):
return len(self.dirs)
def __setitem__(self, key, value):
self.dirs[key] = value
def __delitem__(self, key):
del self.dirs[key]
def __getslice__(self, lo, hi):
return self.dirs[lo:hi]
def __setslice__(self, lo, hi, seq):
self.dirs[lo:hi] = seq
def __delslice__(self, lo, hi):
del self.dirs[lo:hi]
def __add__(self, other):
if type(other) == type(self) or type(other) == type([]):
return self.dirs + other.dirs
def __radd__(self, other):
if type(other) == type(self) or type(other) == type([]):
return other.dirs + self.dirs
# Group(1) is the filename, group(2) is the lineno.
#regexGrepResult=regex.compile("^\\([a-zA-Z]:.*\\)(\\([0-9]+\\))")
regexGrep=re.compile(r"^([a-zA-Z]:[^(]*)\(([0-9]+)\)")
#these are the atom numbers defined by Windows for basic dialog controls
BUTTON = 0x80
EDIT = 0x81
STATIC = 0x82
LISTBOX = 0x83
SCROLLBAR = 0x84
COMBOBOX = 0x85
class GrepTemplate(docview.RichEditDocTemplate):
def __init__(self):
docview.RichEditDocTemplate.__init__(self, win32ui.IDR_TEXTTYPE, GrepDocument, GrepFrame, GrepView)
self.SetDocStrings("\nGrep\nGrep\nGrep params (*.grep)\n.grep\n\n\n")
win32ui.GetApp().AddDocTemplate(self)
self.docparams = None
def MatchDocType(self, fileName, fileType):
doc = self.FindOpenDocument(fileName)
if doc: return doc
ext = os.path.splitext(fileName)[1].lower()
if ext =='.grep':
return win32ui.CDocTemplate_Confidence_yesAttemptNative
return win32ui.CDocTemplate_Confidence_noAttempt
def setParams(self, params):
self.docparams = params
def readParams(self):
tmp = self.docparams
self.docparams = None
return tmp
class GrepFrame(window.MDIChildWnd):
# The template and doc params will one day be removed.
def __init__(self, wnd = None):
window.MDIChildWnd.__init__(self, wnd)
class GrepDocument(docview.RichEditDoc):
def __init__(self, template):
docview.RichEditDoc.__init__(self, template)
self.dirpattern = ''
self.filpattern = ''
self.greppattern = ''
self.casesensitive = 1
self.recurse = 1
self.verbose = 0
def OnOpenDocument(self, fnm):
#this bizarre stuff with params is so right clicking in a result window
#and starting a new grep can communicate the default parameters to the
#new grep.
try:
params = open(fnm,'r').read()
except:
params = None
self.setInitParams(params)
return self.OnNewDocument()
def OnCloseDocument(self):
try:
win32ui.GetApp().DeleteIdleHandler(self.SearchFile)
except:
pass
return self._obj_.OnCloseDocument()
def saveInitParams(self):
# Only save the flags, not the text boxes.
paramstr = "\t%s\t\t%d\t%d" % (self.filpattern, self.casesensitive, self.recurse)
win32ui.WriteProfileVal("Grep", "Params", paramstr)
def setInitParams(self, paramstr):
if paramstr is None:
paramstr = win32ui.GetProfileVal("Grep", "Params", '\t\t\t1\t0\t0')
params = paramstr.split('\t')
if len(params) < 3:
params = params + ['']*(3-len(params))
if len(params) < 6:
params = params + [0]*(6-len(params))
self.dirpattern = params[0]
self.filpattern = params[1]
self.greppattern = params[2]
self.casesensitive = int(params[3])
self.recurse = int(params[4])
self.verbose = int(params[5])
# setup some reasonable defaults.
if not self.dirpattern:
try:
editor=win32ui.GetMainFrame().MDIGetActive()[0].GetEditorView()
self.dirpattern=os.path.abspath(os.path.dirname(editor.GetDocument().GetPathName()))
except (AttributeError, win32ui.error):
self.dirpattern = os.getcwd()
if not self.filpattern:
self.filpattern = "*.py"
def OnNewDocument(self):
if self.dirpattern == '':
self.setInitParams(greptemplate.readParams())
d = GrepDialog(self.dirpattern, self.filpattern, self.greppattern, self.casesensitive, self.recurse, self.verbose)
if d.DoModal() == win32con.IDOK:
self.dirpattern = d['dirpattern']
self.filpattern = d['filpattern']
self.greppattern = d['greppattern']
self.casesensitive = d['casesensitive']
self.recurse = d['recursive']
self.verbose = d['verbose']
self.doSearch()
self.saveInitParams()
return 1
return 0 # cancelled - return zero to stop frame creation.
def doSearch(self):
self.dp = dirpath(self.dirpattern, self.recurse)
self.SetTitle("Grep for %s in %s" % (self.greppattern, self.filpattern))
#self.text = []
self.GetFirstView().Append('#Search '+self.dirpattern+'\n')
if self.verbose:
self.GetFirstView().Append('# ='+repr(self.dp.dirs)+'\n')
self.GetFirstView().Append('# Files '+self.filpattern+'\n')
self.GetFirstView().Append('# For '+self.greppattern+'\n')
self.fplist = self.filpattern.split(';')
if self.casesensitive:
self.pat = re.compile(self.greppattern)
else:
self.pat = re.compile(self.greppattern, re.IGNORECASE)
win32ui.SetStatusText("Searching. Please wait...", 0)
self.dpndx = self.fpndx = 0
self.fndx = -1
if not self.dp:
self.GetFirstView().Append("# ERROR: '%s' does not resolve to any search locations" % self.dirpattern)
self.SetModifiedFlag(0)
else:
self.flist = glob.glob(self.dp[0]+'\\'+self.fplist[0])
win32ui.GetApp().AddIdleHandler(self.SearchFile)
def SearchFile(self, handler, count):
self.fndx = self.fndx + 1
if self.fndx < len(self.flist):
f = self.flist[self.fndx]
if self.verbose:
self.GetFirstView().Append('# ..'+f+'\n')
# Directories may match the file type pattern, and files may be removed
# while grep is running
if os.path.isfile(f):
win32ui.SetStatusText("Searching "+f, 0)
lines = open(f, 'r').readlines()
for i in range(len(lines)):
line = lines[i]
if self.pat.search(line) != None:
self.GetFirstView().Append(f+'('+repr(i+1) + ') '+line)
else:
self.fndx = -1
self.fpndx = self.fpndx + 1
if self.fpndx < len(self.fplist):
self.flist = glob.glob(self.dp[self.dpndx] + '\\' + self.fplist[self.fpndx])
else:
self.fpndx = 0
self.dpndx = self.dpndx + 1
if self.dpndx < len(self.dp):
self.flist = glob.glob(self.dp[self.dpndx] + '\\' + self.fplist[self.fpndx])
else:
win32ui.SetStatusText("Search complete.", 0)
self.SetModifiedFlag(0) # default to not modified.
try:
win32ui.GetApp().DeleteIdleHandler(self.SearchFile)
except:
pass
return 0
return 1
def GetParams(self):
return self.dirpattern+'\t'+self.filpattern+'\t'+self.greppattern+'\t'+repr(self.casesensitive)+'\t'+repr(self.recurse)+'\t'+repr(self.verbose)
def OnSaveDocument(self, filename):
# print 'OnSaveDocument() filename=',filename
savefile = open(filename,"wb")
txt = self.GetParams()+'\n'
# print 'writing',txt
savefile.write(txt)
savefile.close()
self.SetModifiedFlag(0)
return 1
ID_OPEN_FILE = 0xe400
ID_GREP = 0xe401
ID_SAVERESULTS = 0x402
ID_TRYAGAIN = 0x403
class GrepView(docview.RichEditView):
def __init__(self, doc):
docview.RichEditView.__init__(self, doc)
self.SetWordWrap(win32ui.CRichEditView_WrapNone)
self.HookHandlers()
def OnInitialUpdate(self):
rc = self._obj_.OnInitialUpdate()
format = (-402653169, 0, 200, 0, 0, 0, 49, 'Courier New')
self.SetDefaultCharFormat(format)
return rc
def HookHandlers(self):
self.HookMessage(self.OnRClick, win32con.WM_RBUTTONDOWN)
self.HookCommand(self.OnCmdOpenFile, ID_OPEN_FILE)
self.HookCommand(self.OnCmdGrep, ID_GREP)
self.HookCommand(self.OnCmdSave, ID_SAVERESULTS)
self.HookCommand(self.OnTryAgain, ID_TRYAGAIN)
self.HookMessage(self.OnLDblClick,win32con.WM_LBUTTONDBLCLK)
def OnLDblClick(self,params):
line = self.GetLine()
regexGrepResult = regexGrep.match(line)
if regexGrepResult:
fname = regexGrepResult.group(1)
line = int(regexGrepResult.group(2))
scriptutils.JumpToDocument(fname, line)
return 0 # dont pass on
return 1 # pass it on by default.
def OnRClick(self, params):
menu = win32ui.CreatePopupMenu()
flags=win32con.MF_STRING|win32con.MF_ENABLED
lineno = self._obj_.LineFromChar(-1) #selection or current line
line = self._obj_.GetLine(lineno)
regexGrepResult = regexGrep.match(line)
if regexGrepResult:
self.fnm = regexGrepResult.group(1)
self.lnnum = int(regexGrepResult.group(2))
menu.AppendMenu(flags, ID_OPEN_FILE, "&Open "+self.fnm)
menu.AppendMenu(win32con.MF_SEPARATOR)
menu.AppendMenu(flags, ID_TRYAGAIN, "&Try Again")
charstart, charend = self._obj_.GetSel()
if charstart != charend:
linestart = self._obj_.LineIndex(lineno)
self.sel = line[charstart-linestart:charend-linestart]
menu.AppendMenu(flags, ID_GREP, "&Grep for "+self.sel)
menu.AppendMenu(win32con.MF_SEPARATOR)
menu.AppendMenu(flags, win32ui.ID_EDIT_CUT, 'Cu&t')
menu.AppendMenu(flags, win32ui.ID_EDIT_COPY, '&Copy')
menu.AppendMenu(flags, win32ui.ID_EDIT_PASTE, '&Paste')
menu.AppendMenu(flags, win32con.MF_SEPARATOR);
menu.AppendMenu(flags, win32ui.ID_EDIT_SELECT_ALL, '&Select all')
menu.AppendMenu(flags, win32con.MF_SEPARATOR);
menu.AppendMenu(flags, ID_SAVERESULTS, 'Sa&ve results')
menu.TrackPopupMenu(params[5])
return 0
def OnCmdOpenFile(self, cmd, code):
doc = win32ui.GetApp().OpenDocumentFile(self.fnm)
if doc:
vw = doc.GetFirstView()
#hope you have an editor that implements GotoLine()!
try:
vw.GotoLine(int(self.lnnum))
except:
pass
return 0
def OnCmdGrep(self, cmd, code):
curparamsstr = self.GetDocument().GetParams()
params = curparamsstr.split('\t')
params[2] = self.sel
greptemplate.setParams('\t'.join(params))
greptemplate.OpenDocumentFile()
return 0
def OnTryAgain(self, cmd, code):
greptemplate.setParams(self.GetDocument().GetParams())
greptemplate.OpenDocumentFile()
return 0
def OnCmdSave(self, cmd, code):
flags = win32con.OFN_OVERWRITEPROMPT
dlg = win32ui.CreateFileDialog(0, None, None, flags, "Text Files (*.txt)|*.txt||", self)
dlg.SetOFNTitle("Save Results As")
if dlg.DoModal() == win32con.IDOK:
pn = dlg.GetPathName()
self._obj_.SaveTextFile(pn)
return 0
def Append(self, strng):
numlines = self.GetLineCount()
endpos = self.LineIndex(numlines-1) + len(self.GetLine(numlines-1))
self.SetSel(endpos, endpos)
self.ReplaceSel(strng)
class GrepDialog(dialog.Dialog):
def __init__(self, dp, fp, gp, cs, r, v):
style = win32con.DS_MODALFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT
CS = win32con.WS_CHILD | win32con.WS_VISIBLE
tmp = [ ["Grep", (0, 0, 210, 90), style, None, (8, "MS Sans Serif")], ]
tmp.append([STATIC, "Grep For:", -1, (7, 7, 50, 9), CS ])
tmp.append([EDIT, gp, 101, (52, 7, 144, 11), CS | win32con.WS_TABSTOP | win32con.ES_AUTOHSCROLL | win32con.WS_BORDER])
tmp.append([STATIC, "Directories:", -1, (7, 20, 50, 9), CS ])
tmp.append([EDIT, dp, 102, (52, 20, 128, 11), CS | win32con.WS_TABSTOP | win32con.ES_AUTOHSCROLL | win32con.WS_BORDER])
tmp.append([BUTTON, '...', 110, (182,20, 16, 11), CS | win32con.BS_PUSHBUTTON | win32con.WS_TABSTOP])
tmp.append([STATIC, "File types:", -1, (7, 33, 50, 9), CS ])
tmp.append([EDIT, fp, 103, (52, 33, 128, 11), CS | win32con.WS_TABSTOP | win32con.ES_AUTOHSCROLL | win32con.WS_BORDER ])
tmp.append([BUTTON, '...', 111, (182,33, 16, 11), CS | win32con.BS_PUSHBUTTON | win32con.WS_TABSTOP])
tmp.append([BUTTON,'Case sensitive', 104, (7, 45, 72, 9), CS | win32con.BS_AUTOCHECKBOX | win32con.BS_LEFTTEXT| win32con.WS_TABSTOP])
tmp.append([BUTTON,'Subdirectories', 105, (7, 56, 72, 9), CS | win32con.BS_AUTOCHECKBOX | win32con.BS_LEFTTEXT| win32con.WS_TABSTOP])
tmp.append([BUTTON,'Verbose', 106, (7, 67, 72, 9), CS | win32con.BS_AUTOCHECKBOX | win32con.BS_LEFTTEXT| win32con.WS_TABSTOP])
tmp.append([BUTTON,'OK', win32con.IDOK, (166,53, 32, 12), CS | win32con.BS_DEFPUSHBUTTON| win32con.WS_TABSTOP])
tmp.append([BUTTON,'Cancel', win32con.IDCANCEL, (166,67, 32, 12), CS | win32con.BS_PUSHBUTTON| win32con.WS_TABSTOP])
dialog.Dialog.__init__(self, tmp)
self.AddDDX(101,'greppattern')
self.AddDDX(102,'dirpattern')
self.AddDDX(103,'filpattern')
self.AddDDX(104,'casesensitive')
self.AddDDX(105,'recursive')
self.AddDDX(106,'verbose')
self._obj_.data['greppattern'] = gp
self._obj_.data['dirpattern'] = dp
self._obj_.data['filpattern'] = fp
self._obj_.data['casesensitive'] = cs
self._obj_.data['recursive'] = r
self._obj_.data['verbose'] = v
self.HookCommand(self.OnMoreDirectories, 110)
self.HookCommand(self.OnMoreFiles, 111)
def OnMoreDirectories(self, cmd, code):
self.getMore('Grep\\Directories', 'dirpattern')
def OnMoreFiles(self, cmd, code):
self.getMore('Grep\\File Types', 'filpattern')
def getMore(self, section, key):
self.UpdateData(1)
#get the items out of the ini file
ini = win32ui.GetProfileFileName()
secitems = win32api.GetProfileSection(section, ini)
items = []
for secitem in secitems:
items.append(secitem.split('=')[1])
dlg = GrepParamsDialog(items)
if dlg.DoModal() == win32con.IDOK:
itemstr = ';'.join(dlg.getItems())
self._obj_.data[key] = itemstr
#update the ini file with dlg.getNew()
i = 0
newitems = dlg.getNew()
if newitems:
items = items + newitems
for item in items:
win32api.WriteProfileVal(section, repr(i), item, ini)
i = i + 1
self.UpdateData(0)
def OnOK(self):
self.UpdateData(1)
for id, name in [(101,'greppattern'), (102,'dirpattern'), (103,'filpattern')]:
if not self[name]:
self.GetDlgItem(id).SetFocus()
win32api.MessageBeep()
win32ui.SetStatusText("Please enter a value")
return
self._obj_.OnOK()
class GrepParamsDialog(dialog.Dialog):
def __init__(self, items):
self.items = items
self.newitems = []
style = win32con.DS_MODALFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT
CS = win32con.WS_CHILD | win32con.WS_VISIBLE
tmp = [ ["Grep Parameters", (0, 0, 205, 100), style, None, (8, "MS Sans Serif")], ]
tmp.append([LISTBOX, '', 107, (7, 7, 150, 72), CS | win32con.LBS_MULTIPLESEL| win32con.LBS_STANDARD | win32con.LBS_HASSTRINGS | win32con.WS_TABSTOP | win32con.LBS_NOTIFY])
tmp.append([BUTTON,'OK', win32con.IDOK, (167, 7, 32, 12), CS | win32con.BS_DEFPUSHBUTTON| win32con.WS_TABSTOP])
tmp.append([BUTTON,'Cancel', win32con.IDCANCEL, (167,23, 32, 12), CS | win32con.BS_PUSHBUTTON| win32con.WS_TABSTOP])
tmp.append([STATIC,'New:', -1, (2, 83, 15, 12), CS])
tmp.append([EDIT, '', 108, (18, 83, 139, 12), CS | win32con.WS_TABSTOP | win32con.ES_AUTOHSCROLL | win32con.WS_BORDER])
tmp.append([BUTTON,'Add', 109, (167,83, 32, 12), CS | win32con.BS_PUSHBUTTON| win32con.WS_TABSTOP])
dialog.Dialog.__init__(self, tmp)
self.HookCommand(self.OnAddItem, 109)
self.HookCommand(self.OnListDoubleClick, 107)
def OnInitDialog(self):
lb = self.GetDlgItem(107)
for item in self.items:
lb.AddString(item)
return self._obj_.OnInitDialog()
def OnAddItem(self, cmd, code):
eb = self.GetDlgItem(108)
item = eb.GetLine(0)
self.newitems.append(item)
lb = self.GetDlgItem(107)
i = lb.AddString(item)
lb.SetSel(i, 1)
return 1
def OnListDoubleClick(self, cmd, code):
if code == win32con.LBN_DBLCLK:
self.OnOK()
return 1
def OnOK(self):
lb = self.GetDlgItem(107)
self.selections = lb.GetSelTextItems()
self._obj_.OnOK()
def getItems(self):
return self.selections
def getNew(self):
return self.newitems
try:
win32ui.GetApp().RemoveDocTemplate(greptemplate)
except NameError:
pass
greptemplate = GrepTemplate()
|
{
"content_hash": "bc5ad7e865a6b53ea6ee676151d4315f",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 195,
"avg_line_length": 34.34117647058824,
"alnum_prop": 0.6652392371816832,
"repo_name": "sserrot/champion_relationships",
"id": "feeacb6e11242681a1570680856f225ae59abfe8",
"size": "18658",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/pythonwin/pywin/framework/sgrepmdi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
}
|
"""Run interop (cross-language) tests in parallel."""
import argparse
import atexit
import dockerjob
import itertools
import jobset
import json
import multiprocessing
import os
import re
import report_utils
import subprocess
import sys
import tempfile
import time
import uuid
# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(['stty', 'echo']))
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
_DEFAULT_SERVER_PORT=8080
_SKIP_COMPRESSION = ['large_compressed_unary',
'server_compressed_streaming']
_SKIP_ADVANCED = ['custom_metadata', 'status_code_and_message',
'unimplemented_method']
_TEST_TIMEOUT = 3*60
class CXXLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = 'cxx'
def client_cmd(self, args):
return ['bins/opt/interop_client'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['bins/opt/interop_server', '--use_tls=true'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_ADVANCED
def unimplemented_test_cases_server(self):
return _SKIP_ADVANCED
def __str__(self):
return 'c++'
class CSharpLanguage:
def __init__(self):
self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug'
self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug'
self.safename = str(self)
def client_cmd(self, args):
return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['mono', 'Grpc.IntegrationTesting.Server.exe', '--use_tls=true'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'csharp'
class JavaLanguage:
def __init__(self):
self.client_cwd = '../grpc-java'
self.server_cwd = '../grpc-java'
self.safename = str(self)
def client_cmd(self, args):
return ['./run-test-client.sh'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['./run-test-server.sh', '--use_tls=true'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION
def __str__(self):
return 'java'
class GoLanguage:
def __init__(self):
# TODO: this relies on running inside docker
self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
self.safename = str(self)
def client_cmd(self, args):
return ['go', 'run', 'client.go'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['go', 'run', 'server.go', '--use_tls=true'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION
def __str__(self):
return 'go'
class Http2Client:
"""Represents the HTTP/2 Interop Test
This pretends to be a language in order to be built and run, but really it
isn't.
"""
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _TEST_CASES
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'http2'
class NodeLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['node', 'src/node/interop/interop_client.js'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['node', 'src/node/interop/interop_server.js', '--use_tls=true'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_COMPRESSION
def __str__(self):
return 'node'
class PHPLanguage:
def __init__(self):
self.client_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['src/php/bin/interop_client.sh'] + args
def cloud_to_prod_env(self):
return {}
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return []
def __str__(self):
return 'php'
class RubyLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return ['ruby', 'src/ruby/pb/test/client.rb'] + args
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return ['ruby', 'src/ruby/pb/test/server.rb', '--use_tls=true'] + args
def global_env(self):
return {}
def unimplemented_test_cases(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION
def unimplemented_test_cases_server(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION
def __str__(self):
return 'ruby'
class PythonLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = str(self)
def client_cmd(self, args):
return [
'tox -einterop_client --',
' '.join(args)
]
def cloud_to_prod_env(self):
return {}
def server_cmd(self, args):
return [
'tox -einterop_server --',
' '.join(args) + ' --use_tls=true'
]
def global_env(self):
return {'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)}
def unimplemented_test_cases(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION + ['jwt_token_creds',
'per_rpc_creds']
def unimplemented_test_cases_server(self):
return _SKIP_ADVANCED + _SKIP_COMPRESSION
def __str__(self):
return 'python'
_LANGUAGES = {
'c++' : CXXLanguage(),
'csharp' : CSharpLanguage(),
'go' : GoLanguage(),
'java' : JavaLanguage(),
'node' : NodeLanguage(),
'php' : PHPLanguage(),
'ruby' : RubyLanguage(),
'python' : PythonLanguage(),
}
# languages supported as cloud_to_cloud servers
_SERVERS = ['c++', 'node', 'csharp', 'java', 'go', 'ruby', 'python']
_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
'empty_stream', 'client_streaming', 'server_streaming',
'cancel_after_begin', 'cancel_after_first_response',
'timeout_on_sleeping_server', 'custom_metadata',
'status_code_and_message', 'unimplemented_method',
'large_compressed_unary', 'server_compressed_streaming']
_AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
'oauth2_auth_token', 'per_rpc_creds']
_HTTP2_TEST_CASES = ["tls", "framing"]
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
"""Wraps given cmdline array to create 'docker run' cmdline from it."""
docker_cmdline = ['docker', 'run', '-i', '--rm=true']
# turn environ into -e docker args
if environ:
for k,v in environ.iteritems():
docker_cmdline += ['-e', '%s=%s' % (k,v)]
# set working directory
workdir = DOCKER_WORKDIR_ROOT
if cwd:
workdir = os.path.join(workdir, cwd)
docker_cmdline += ['-w', workdir]
docker_cmdline += docker_args + [image] + cmdline
return docker_cmdline
def bash_login_cmdline(cmdline):
"""Creates bash -l -c cmdline from args list."""
# Use login shell:
# * rvm and nvm require it
# * makes error messages clearer if executables are missing
return ['bash', '-l', '-c', ' '.join(cmdline)]
def auth_options(language, test_case):
"""Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
language = str(language)
cmdargs = []
env = {}
# TODO(jtattermusch): this file path only works inside docker
key_filepath = '/root/service_account/stubbyCloudTestingTest-ee3fce360ac5.json'
oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
key_file_arg = '--service_account_key_file=%s' % key_filepath
default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
if language in ['csharp', 'node', 'php', 'python', 'ruby']:
env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
else:
cmdargs += [key_file_arg]
if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
cmdargs += [oauth_scope_arg]
if test_case == 'oauth2_auth_token' and language == 'c++':
# C++ oauth2 test uses GCE creds and thus needs to know the default account
cmdargs += [default_account_arg]
if test_case == 'compute_engine_creds':
cmdargs += [oauth_scope_arg, default_account_arg]
return (cmdargs, env)
def _job_kill_handler(job):
if job._spec.container_name:
dockerjob.docker_kill(job._spec.container_name)
# When the job times out and we decide to kill it,
# we need to wait a before restarting the job
# to prevent "container name already in use" error.
# TODO(jtattermusch): figure out a cleaner way to to this.
time.sleep(2)
def cloud_to_prod_jobspec(language, test_case, server_host_name,
server_host_detail, docker_image=None, auth=False):
"""Creates jobspec for cloud-to-prod interop test"""
container_name = None
cmdargs = [
'--server_host=%s' % server_host_detail[0],
'--server_host_override=%s' % server_host_detail[1],
'--server_port=443',
'--use_tls=true',
'--test_case=%s' % test_case]
environ = dict(language.cloud_to_prod_env(), **language.global_env())
if auth:
auth_cmdargs, auth_env = auth_options(language, test_case)
cmdargs += auth_cmdargs
environ.update(auth_env)
cmdline = bash_login_cmdline(language.client_cmd(cmdargs))
cwd = language.client_cwd
if docker_image:
container_name = dockerjob.random_name('interop_client_%s' %
language.safename)
cmdline = docker_run_cmdline(cmdline,
image=docker_image,
cwd=cwd,
environ=environ,
docker_args=['--net=host',
'--name', container_name])
cwd = None
environ = None
suite_name='cloud_to_prod_auth' if auth else 'cloud_to_prod'
test_job = jobset.JobSpec(
cmdline=cmdline,
cwd=cwd,
environ=environ,
shortname='%s:%s:%s:%s' % (suite_name, server_host_name, language,
test_case),
timeout_seconds=_TEST_TIMEOUT,
flake_retries=5 if args.allow_flakes else 0,
timeout_retries=2 if args.allow_flakes else 0,
kill_handler=_job_kill_handler)
test_job.container_name = container_name
return test_job
def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
server_port, docker_image=None):
"""Creates jobspec for cloud-to-cloud interop test"""
cmdline = bash_login_cmdline(language.client_cmd([
'--server_host_override=foo.test.google.fr',
'--use_tls=true',
'--use_test_ca=true',
'--test_case=%s' % test_case,
'--server_host=%s' % server_host,
'--server_port=%s' % server_port]))
cwd = language.client_cwd
environ = language.global_env()
if docker_image:
container_name = dockerjob.random_name('interop_client_%s' % language.safename)
cmdline = docker_run_cmdline(cmdline,
image=docker_image,
environ=environ,
cwd=cwd,
docker_args=['--net=host',
'--name', container_name])
cwd = None
test_job = jobset.JobSpec(
cmdline=cmdline,
cwd=cwd,
environ=environ,
shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
test_case),
timeout_seconds=_TEST_TIMEOUT,
flake_retries=5 if args.allow_flakes else 0,
timeout_retries=2 if args.allow_flakes else 0,
kill_handler=_job_kill_handler)
test_job.container_name = container_name
return test_job
def server_jobspec(language, docker_image):
"""Create jobspec for running a server"""
container_name = dockerjob.random_name('interop_server_%s' % language.safename)
cmdline = bash_login_cmdline(
language.server_cmd(['--port=%s' % _DEFAULT_SERVER_PORT]))
environ = language.global_env()
docker_cmdline = docker_run_cmdline(cmdline,
image=docker_image,
cwd=language.server_cwd,
environ=environ,
docker_args=['-p', str(_DEFAULT_SERVER_PORT),
'--name', container_name])
server_job = jobset.JobSpec(
cmdline=docker_cmdline,
environ=environ,
shortname='interop_server_%s' % language,
timeout_seconds=30*60)
server_job.container_name = container_name
return server_job
def build_interop_image_jobspec(language, tag=None):
"""Creates jobspec for building interop docker image for a language"""
if not tag:
tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
env = {'INTEROP_IMAGE': tag,
'BASE_NAME': 'grpc_interop_%s' % language.safename}
if not args.travis:
env['TTY_FLAG'] = '-t'
# This env variable is used to get around the github rate limit
# error when running the PHP `composer install` command
host_file = '%s/.composer/auth.json' % os.environ['HOME']
if language.safename == 'php' and os.path.exists(host_file):
env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
'-v %s:/root/.composer/auth.json:ro' % host_file
build_job = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
environ=env,
shortname='build_docker_%s' % (language),
timeout_seconds=30*60)
build_job.tag = tag
return build_job
def aggregate_http2_results(stdout):
match = re.search(r'\{"cases[^\]]*\]\}', stdout)
if not match:
return None
results = json.loads(match.group(0))
skipped = 0
passed = 0
failed = 0
failed_cases = []
for case in results['cases']:
if case.get('skipped', False):
skipped += 1
else:
if case.get('passed', False):
passed += 1
else:
failed += 1
failed_cases.append(case.get('name', "NONAME"))
return {
'passed': passed,
'failed': failed,
'skipped': skipped,
'failed_cases': ', '.join(failed_cases),
'percent': 1.0 * passed / (passed + failed)
}
# A dictionary of prod servers to test.
# Format: server_name: (server_host, server_host_override, errors_allowed)
# TODO(adelez): implement logic for errors_allowed where if the indicated tests
# fail, they don't impact the overall test result.
prod_servers = {
'default': ('grpc-test.sandbox.googleapis.com',
'grpc-test.sandbox.googleapis.com', False),
'gateway_v2': ('grpc-test2.sandbox.googleapis.com',
'grpc-test2.sandbox.googleapis.com', True),
'cloud_gateway': ('216.239.32.255', 'grpc-test.sandbox.googleapis.com',
False),
'cloud_gateway_v2': ('216.239.32.255', 'grpc-test2.sandbox.googleapis.com',
True),
'gateway_v4': ('grpc-test4.sandbox.googleapis.com',
'grpc-test4.sandbox.googleapis.com', True),
'cloud_gateway_v4': ('216.239.32.255', 'grpc-test4.sandbox.googleapis.com',
True),
}
argp = argparse.ArgumentParser(description='Run interop tests.')
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES),
nargs='+',
default=['all'],
help='Clients to run.')
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('--cloud_to_prod',
default=False,
action='store_const',
const=True,
help='Run cloud_to_prod tests.')
argp.add_argument('--cloud_to_prod_auth',
default=False,
action='store_const',
const=True,
help='Run cloud_to_prod_auth tests.')
argp.add_argument('--prod_servers',
choices=prod_servers.keys(),
default=['default'],
nargs='+',
help=('The servers to run cloud_to_prod and '
'cloud_to_prod_auth tests against.'))
argp.add_argument('-s', '--server',
choices=['all'] + sorted(_SERVERS),
action='append',
help='Run cloud_to_cloud servers in a separate docker ' +
'image. Servers can only be started automatically if ' +
'--use_docker option is enabled.',
default=[])
argp.add_argument('--override_server',
action='append',
type=lambda kv: kv.split('='),
help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
default=[])
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the interop tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
argp.add_argument('--http2_interop',
default=False,
action='store_const',
const=True,
help='Enable HTTP/2 interop tests')
args = argp.parse_args()
servers = set(s for s in itertools.chain.from_iterable(_SERVERS
if x == 'all' else [x]
for x in args.server))
if args.use_docker:
if not args.travis:
print 'Seen --use_docker flag, will run interop tests under docker.'
print
print 'IMPORTANT: The changes you are testing need to be locally committed'
print 'because only the committed changes in the current branch will be'
print 'copied to the docker environment.'
time.sleep(5)
if not args.use_docker and servers:
print 'Running interop servers is only supported with --use_docker option enabled.'
sys.exit(1)
languages = set(_LANGUAGES[l]
for l in itertools.chain.from_iterable(
_LANGUAGES.iterkeys() if x == 'all' else [x]
for x in args.language))
http2Interop = Http2Client() if args.http2_interop else None
docker_images={}
if args.use_docker:
# languages for which to build docker images
languages_to_build = set(_LANGUAGES[k] for k in set([str(l) for l in languages] +
[s for s in servers]))
if args.http2_interop:
languages_to_build.add(http2Interop)
build_jobs = []
for l in languages_to_build:
job = build_interop_image_jobspec(l)
docker_images[str(l)] = job.tag
build_jobs.append(job)
if build_jobs:
jobset.message('START', 'Building interop docker images.', do_newline=True)
num_failures, _ = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs)
if num_failures == 0:
jobset.message('SUCCESS', 'All docker images built successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Failed to build interop docker images.',
do_newline=True)
for image in docker_images.itervalues():
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
# Start interop servers.
server_jobs={}
server_addresses={}
try:
for s in servers:
lang = str(s)
spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang))
job = dockerjob.DockerJob(spec)
server_jobs[lang] = job
server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT))
jobs = []
if args.cloud_to_prod:
for server_host_name in args.prod_servers:
for language in languages:
for test_case in _TEST_CASES:
if not test_case in language.unimplemented_test_cases():
if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION:
test_job = cloud_to_prod_jobspec(
language, test_case, server_host_name,
prod_servers[server_host_name],
docker_image=docker_images.get(str(language)))
jobs.append(test_job)
if args.http2_interop:
for test_case in _HTTP2_TEST_CASES:
test_job = cloud_to_prod_jobspec(
http2Interop, test_case, server_host_name,
prod_servers[server_host_name],
docker_image=docker_images.get(str(http2Interop)))
jobs.append(test_job)
if args.cloud_to_prod_auth:
for server_host_name in args.prod_servers:
for language in languages:
for test_case in _AUTH_TEST_CASES:
if not test_case in language.unimplemented_test_cases():
test_job = cloud_to_prod_jobspec(
language, test_case, server_host_name,
prod_servers[server_host_name],
docker_image=docker_images.get(str(language)), auth=True)
jobs.append(test_job)
for server in args.override_server:
server_name = server[0]
(server_host, server_port) = server[1].split(':')
server_addresses[server_name] = (server_host, server_port)
for server_name, server_address in server_addresses.iteritems():
(server_host, server_port) = server_address
server_language = _LANGUAGES.get(server_name, None)
skip_server = [] # test cases unimplemented by server
if server_language:
skip_server = server_language.unimplemented_test_cases_server()
for language in languages:
for test_case in _TEST_CASES:
if not test_case in language.unimplemented_test_cases():
if not test_case in skip_server:
test_job = cloud_to_cloud_jobspec(language,
test_case,
server_name,
server_host,
server_port,
docker_image=docker_images.get(str(language)))
jobs.append(test_job)
if args.http2_interop:
for test_case in _HTTP2_TEST_CASES:
if server_name == "go":
# TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
continue
test_job = cloud_to_cloud_jobspec(http2Interop,
test_case,
server_name,
server_host,
server_port,
docker_image=docker_images.get(str(http2Interop)))
jobs.append(test_job)
if not jobs:
print 'No jobs to run.'
for image in docker_images.itervalues():
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
num_failures, resultset = jobset.run(jobs, newline_on_success=True,
maxjobs=args.jobs)
if num_failures:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
else:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
report_utils.render_junit_xml_report(resultset, 'report.xml')
for name, job in resultset.iteritems():
if "http2" in name:
job[0].http2results = aggregate_http2_results(job[0].message)
report_utils.render_interop_html_report(
set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES,
_HTTP2_TEST_CASES, resultset, num_failures,
args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers,
args.http2_interop)
finally:
# Check if servers are still running.
for server, job in server_jobs.iteritems():
if not job.is_running():
print 'Server "%s" has exited prematurely.' % server
dockerjob.finish_jobs([j for j in server_jobs.itervalues()])
for image in docker_images.itervalues():
print 'Removing docker image %s' % image
dockerjob.remove_image(image)
|
{
"content_hash": "1f00d60a7bcec811382b6caa685b0d40",
"timestamp": "",
"source": "github",
"line_count": 794,
"max_line_length": 110,
"avg_line_length": 32.60705289672544,
"alnum_prop": 0.5969100038624952,
"repo_name": "shishaochen/TensorFlow-0.8-Win",
"id": "edbdf05e2a24937b6a281fe0fe926ea0458a02d2",
"size": "27444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/grpc/tools/run_tests/run_interop_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "33878"
},
{
"name": "C",
"bytes": "1390259"
},
{
"name": "C#",
"bytes": "1900628"
},
{
"name": "C++",
"bytes": "28129535"
},
{
"name": "CMake",
"bytes": "417657"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "Emacs Lisp",
"bytes": "7809"
},
{
"name": "GCC Machine Description",
"bytes": "1"
},
{
"name": "Go",
"bytes": "8549"
},
{
"name": "Groff",
"bytes": "1272396"
},
{
"name": "HTML",
"bytes": "849000"
},
{
"name": "Java",
"bytes": "3139664"
},
{
"name": "JavaScript",
"bytes": "417956"
},
{
"name": "Jupyter Notebook",
"bytes": "1772913"
},
{
"name": "M4",
"bytes": "78386"
},
{
"name": "Makefile",
"bytes": "1177180"
},
{
"name": "Objective-C",
"bytes": "2580186"
},
{
"name": "Objective-C++",
"bytes": "2897"
},
{
"name": "PHP",
"bytes": "342"
},
{
"name": "Protocol Buffer",
"bytes": "924786"
},
{
"name": "Python",
"bytes": "8241830"
},
{
"name": "Ruby",
"bytes": "82233"
},
{
"name": "Shell",
"bytes": "1875702"
},
{
"name": "Swift",
"bytes": "20550"
},
{
"name": "TypeScript",
"bytes": "395532"
},
{
"name": "VimL",
"bytes": "3759"
}
],
"symlink_target": ""
}
|
from tkinter import *
root = Tk()
Label(root, text = "Hello tasdik!").pack(fill = BOTH, expand = YES)
root.mainloop()
|
{
"content_hash": "1ec91f0286f024e30814d3526ca844e4",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 67,
"avg_line_length": 23.6,
"alnum_prop": 0.6779661016949152,
"repo_name": "prodicus/dabble",
"id": "3076f4e0142695f883f06dbfc7a9cc2137f28c7e",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tkinter/second.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "4089"
},
{
"name": "C",
"bytes": "167"
},
{
"name": "C++",
"bytes": "93199"
},
{
"name": "CSS",
"bytes": "15389"
},
{
"name": "HTML",
"bytes": "57073"
},
{
"name": "Java",
"bytes": "141841"
},
{
"name": "Jupyter Notebook",
"bytes": "526025"
},
{
"name": "PHP",
"bytes": "1512"
},
{
"name": "PLpgSQL",
"bytes": "4812"
},
{
"name": "Python",
"bytes": "14193"
},
{
"name": "Shell",
"bytes": "295"
}
],
"symlink_target": ""
}
|
from onadata.libs.permissions import get_object_users_with_permissions
from onadata.libs.permissions import OwnerRole
from onadata.libs.permissions import ROLES
def set_project_perms_to_xform(xform, project):
# allows us to still use xform.shared and xform.shared_data as before
# only switch if xform.shared is False
xform_is_shared = xform.shared or xform.shared_data
if not xform_is_shared and project.shared != xform.shared:
xform.shared = project.shared
xform.shared_data = project.shared
xform.save()
for perm in get_object_users_with_permissions(project):
user = perm['user']
role_name = perm['role']
role = ROLES.get(role_name)
if user != xform.created_by:
role.add(user, xform)
else:
OwnerRole.add(user, xform)
|
{
"content_hash": "e1b836de74da17442a28dce6f330fdd7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 73,
"avg_line_length": 36.17391304347826,
"alnum_prop": 0.6754807692307693,
"repo_name": "jnordling/cabin",
"id": "b4be644e2179bc1ebb12794966fcc3c757a6078a",
"size": "832",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "onadata/libs/utils/project_utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "HTML",
"bytes": "248525"
},
{
"name": "JavaScript",
"bytes": "904742"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "2813604"
},
{
"name": "Shell",
"bytes": "14149"
}
],
"symlink_target": ""
}
|
from testutils import unittest, skip_before_python, skip_before_postgres
from testutils import ConnectingTestCase, skip_copy_if_green
import psycopg2
class ConnectTestCase(unittest.TestCase):
def setUp(self):
self.args = None
def conect_stub(dsn, connection_factory=None, async=False):
self.args = (dsn, connection_factory, async)
self._connect_orig = psycopg2._connect
psycopg2._connect = conect_stub
def tearDown(self):
psycopg2._connect = self._connect_orig
def test_there_has_to_be_something(self):
self.assertRaises(TypeError, psycopg2.connect)
self.assertRaises(TypeError, psycopg2.connect,
connection_factory=lambda dsn, async=False: None)
self.assertRaises(TypeError, psycopg2.connect,
async=True)
def test_no_keywords(self):
psycopg2.connect('')
self.assertEqual(self.args[0], '')
self.assertEqual(self.args[1], None)
self.assertEqual(self.args[2], False)
def test_dsn(self):
psycopg2.connect('dbname=blah x=y')
self.assertEqual(self.args[0], 'dbname=blah x=y')
self.assertEqual(self.args[1], None)
self.assertEqual(self.args[2], False)
def test_supported_keywords(self):
psycopg2.connect(database='foo')
self.assertEqual(self.args[0], 'dbname=foo')
psycopg2.connect(user='postgres')
self.assertEqual(self.args[0], 'user=postgres')
psycopg2.connect(password='secret')
self.assertEqual(self.args[0], 'password=secret')
psycopg2.connect(port=5432)
self.assertEqual(self.args[0], 'port=5432')
psycopg2.connect(sslmode='require')
self.assertEqual(self.args[0], 'sslmode=require')
psycopg2.connect(database='foo',
user='postgres', password='secret', port=5432)
self.assert_('dbname=foo' in self.args[0])
self.assert_('user=postgres' in self.args[0])
self.assert_('password=secret' in self.args[0])
self.assert_('port=5432' in self.args[0])
self.assertEqual(len(self.args[0].split()), 4)
def test_generic_keywords(self):
psycopg2.connect(foo='bar')
self.assertEqual(self.args[0], 'foo=bar')
def test_factory(self):
def f(dsn, async=False):
pass
psycopg2.connect(database='foo', bar='baz', connection_factory=f)
self.assertEqual(self.args[0], 'dbname=foo bar=baz')
self.assertEqual(self.args[1], f)
self.assertEqual(self.args[2], False)
psycopg2.connect("dbname=foo bar=baz", connection_factory=f)
self.assertEqual(self.args[0], 'dbname=foo bar=baz')
self.assertEqual(self.args[1], f)
self.assertEqual(self.args[2], False)
def test_async(self):
psycopg2.connect(database='foo', bar='baz', async=1)
self.assertEqual(self.args[0], 'dbname=foo bar=baz')
self.assertEqual(self.args[1], None)
self.assert_(self.args[2])
psycopg2.connect("dbname=foo bar=baz", async=True)
self.assertEqual(self.args[0], 'dbname=foo bar=baz')
self.assertEqual(self.args[1], None)
self.assert_(self.args[2])
def test_empty_param(self):
psycopg2.connect(database='sony', password='')
self.assertEqual(self.args[0], "dbname=sony password=''")
def test_escape(self):
psycopg2.connect(database='hello world')
self.assertEqual(self.args[0], "dbname='hello world'")
psycopg2.connect(database=r'back\slash')
self.assertEqual(self.args[0], r"dbname=back\\slash")
psycopg2.connect(database="quo'te")
self.assertEqual(self.args[0], r"dbname=quo\'te")
psycopg2.connect(database="with\ttab")
self.assertEqual(self.args[0], "dbname='with\ttab'")
psycopg2.connect(database=r"\every thing'")
self.assertEqual(self.args[0], r"dbname='\\every thing\''")
def test_no_kwargs_swallow(self):
self.assertRaises(TypeError,
psycopg2.connect, 'dbname=foo', database='foo')
self.assertRaises(TypeError,
psycopg2.connect, 'dbname=foo', user='postgres')
self.assertRaises(TypeError,
psycopg2.connect, 'dbname=foo', no_such_param='meh')
class ExceptionsTestCase(ConnectingTestCase):
def test_attributes(self):
cur = self.conn.cursor()
try:
cur.execute("select * from nonexist")
except psycopg2.Error, exc:
e = exc
self.assertEqual(e.pgcode, '42P01')
self.assert_(e.pgerror)
self.assert_(e.cursor is cur)
def test_diagnostics_attributes(self):
cur = self.conn.cursor()
try:
cur.execute("select * from nonexist")
except psycopg2.Error, exc:
e = exc
diag = e.diag
self.assert_(isinstance(diag, psycopg2.extensions.Diagnostics))
for attr in [
'column_name', 'constraint_name', 'context', 'datatype_name',
'internal_position', 'internal_query', 'message_detail',
'message_hint', 'message_primary', 'schema_name', 'severity',
'source_file', 'source_function', 'source_line', 'sqlstate',
'statement_position', 'table_name', ]:
v = getattr(diag, attr)
if v is not None:
self.assert_(isinstance(v, str))
def test_diagnostics_values(self):
cur = self.conn.cursor()
try:
cur.execute("select * from nonexist")
except psycopg2.Error, exc:
e = exc
self.assertEqual(e.diag.sqlstate, '42P01')
self.assertEqual(e.diag.severity, 'ERROR')
def test_diagnostics_life(self):
import gc
from weakref import ref
def tmp():
cur = self.conn.cursor()
try:
cur.execute("select * from nonexist")
except psycopg2.Error, exc:
return cur, exc
cur, e = tmp()
diag = e.diag
w = ref(cur)
del e, cur
gc.collect()
assert(w() is not None)
self.assertEqual(diag.sqlstate, '42P01')
del diag
gc.collect(); gc.collect()
assert(w() is None)
@skip_copy_if_green
def test_diagnostics_copy(self):
from StringIO import StringIO
f = StringIO()
cur = self.conn.cursor()
try:
cur.copy_to(f, 'nonexist')
except psycopg2.Error, exc:
diag = exc.diag
self.assertEqual(diag.sqlstate, '42P01')
def test_diagnostics_independent(self):
cur = self.conn.cursor()
try:
cur.execute("l'acqua e' poca e 'a papera nun galleggia")
except Exception, exc:
diag1 = exc.diag
self.conn.rollback()
try:
cur.execute("select level from water where ducks > 1")
except psycopg2.Error, exc:
diag2 = exc.diag
self.assertEqual(diag1.sqlstate, '42601')
self.assertEqual(diag2.sqlstate, '42P01')
def test_diagnostics_from_commit(self):
cur = self.conn.cursor()
cur.execute("""
create temp table test_deferred (
data int primary key,
ref int references test_deferred (data)
deferrable initially deferred)
""")
cur.execute("insert into test_deferred values (1,2)")
try:
self.conn.commit()
except psycopg2.Error, exc:
e = exc
self.assertEqual(e.diag.sqlstate, '23503')
@skip_before_postgres(9, 3)
def test_9_3_diagnostics(self):
cur = self.conn.cursor()
cur.execute("""
create temp table test_exc (
data int constraint chk_eq1 check (data = 1)
)""")
try:
cur.execute("insert into test_exc values(2)")
except psycopg2.Error, exc:
e = exc
self.assertEqual(e.pgcode, '23514')
self.assertEqual(e.diag.schema_name[:7], "pg_temp")
self.assertEqual(e.diag.table_name, "test_exc")
self.assertEqual(e.diag.column_name, None)
self.assertEqual(e.diag.constraint_name, "chk_eq1")
self.assertEqual(e.diag.datatype_name, None)
@skip_before_python(2, 5)
def test_pickle(self):
import pickle
cur = self.conn.cursor()
try:
cur.execute("select * from nonexist")
except psycopg2.Error, exc:
e = exc
e1 = pickle.loads(pickle.dumps(e))
self.assertEqual(e.pgerror, e1.pgerror)
self.assertEqual(e.pgcode, e1.pgcode)
self.assert_(e1.cursor is None)
@skip_before_python(2, 5)
def test_pickle_connection_error(self):
# segfaults on psycopg 2.5.1 - see ticket #170
import pickle
try:
psycopg2.connect('dbname=nosuchdatabasemate')
except psycopg2.Error, exc:
e = exc
e1 = pickle.loads(pickle.dumps(e))
self.assertEqual(e.pgerror, e1.pgerror)
self.assertEqual(e.pgcode, e1.pgcode)
self.assert_(e1.cursor is None)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "43dcf9f19d51d8a1c8fe0e8e28db7832",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 77,
"avg_line_length": 33.44604316546763,
"alnum_prop": 0.592600559260056,
"repo_name": "poojavade/Genomics_Docker",
"id": "b2f5279dd4cd340ac53d5f45f7bfc94bd66010a4",
"size": "10326",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/psycopg2/tests/test_module.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "1265138"
},
{
"name": "C++",
"bytes": "4734960"
},
{
"name": "CSS",
"bytes": "17332"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "77173"
},
{
"name": "HTML",
"bytes": "395483"
},
{
"name": "Java",
"bytes": "9223"
},
{
"name": "JavaScript",
"bytes": "783663"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Lua",
"bytes": "28217"
},
{
"name": "Makefile",
"bytes": "77825"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "244796"
},
{
"name": "Python",
"bytes": "54562861"
},
{
"name": "R",
"bytes": "2568"
},
{
"name": "Shell",
"bytes": "40620"
},
{
"name": "Smarty",
"bytes": "21035"
},
{
"name": "TeX",
"bytes": "55310"
}
],
"symlink_target": ""
}
|
"""Dataset utilities and synthetic/reference datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.datasets import mnist
from tensorflow.contrib.learn.python.learn.datasets import synthetic
from tensorflow.contrib.learn.python.learn.datasets import text_datasets
# Export load_iris and load_boston.
load_iris = base.load_iris
load_boston = base.load_boston
# List of all available datasets.
# Note, currently they may return different types.
DATASETS = {
# Returns base.Dataset.
'iris': base.load_iris,
'boston': base.load_boston,
# Returns base.Datasets (train/validation/test sets).
'mnist': mnist.load_mnist,
'dbpedia': text_datasets.load_dbpedia,
}
# List of all synthetic datasets
SYNTHETIC = {
# All of these will return ['data', 'target'] -> base.Dataset
'circles': synthetic.circles,
'spirals': synthetic.spirals
}
def load_dataset(name, size='small', test_with_fake_data=False):
"""Loads dataset by name.
Args:
name: Name of the dataset to load.
size: Size of the dataset to load.
test_with_fake_data: If true, load with fake dataset.
Returns:
Features and labels for given dataset. Can be numpy or iterator.
Raises:
ValueError: if `name` is not found.
"""
if name not in DATASETS:
raise ValueError('Name of dataset is not found: %s' % name)
if name == 'dbpedia':
return DATASETS[name](size, test_with_fake_data)
else:
return DATASETS[name]()
def make_dataset(name, n_samples=100, noise=None, seed=42, *args, **kwargs):
"""Creates binary synthetic datasets
Args:
name: str, name of the dataset to generate
n_samples: int, number of datapoints to generate
noise: float or None, standard deviation of the Gaussian noise added
seed: int or None, seed for noise
Returns:
Shuffled features and labels for given synthetic dataset of type
`base.Dataset`
Raises:
ValueError: Raised if `name` not found
Note:
- This is a generic synthetic data generator - individual generators might
have more parameters!
See documentation for individual parameters
- Note that the `noise` parameter uses `numpy.random.normal` and depends on
`numpy`'s seed
TODO:
- Support multiclass datasets
- Need shuffling routine. Currently synthetic datasets are reshuffled to
avoid train/test correlation,
but that hurts reprodusability
"""
# seed = kwargs.pop('seed', None)
if name not in SYNTHETIC:
raise ValueError('Synthetic dataset not found or not implemeted: %s' % name)
else:
return SYNTHETIC[name](
n_samples=n_samples, noise=noise, seed=seed, *args, **kwargs)
|
{
"content_hash": "ace6ac22f941e04d01d4c88eb359c977",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 80,
"avg_line_length": 29.958333333333332,
"alnum_prop": 0.713490959666203,
"repo_name": "av8ramit/tensorflow",
"id": "7240b0de149051afa045a8113f9e9b212840c311",
"size": "3565",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/datasets/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9096"
},
{
"name": "C",
"bytes": "332331"
},
{
"name": "C++",
"bytes": "37144977"
},
{
"name": "CMake",
"bytes": "193247"
},
{
"name": "Go",
"bytes": "1061627"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "544069"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48122"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "1487"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "32711532"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "422931"
}
],
"symlink_target": ""
}
|
"""Configuration for Externality Mushrooms.
Externality mushrooms is an immediate feedback collective action problem and
social dilemma. Unlike the other sequential social dilemmas in this suite, there
is no delay between the time when an agent takes an antisocial (or prosocial)
action and when its effect is felt by all other players. Thus it is a
sequential social dilemma in the sense of Leibo et al. 2017, but not an
intertemporal social dilemma in the sense of Hughes et al. 2018.
Three types of mushrooms are spread around the map and can be consumed for a
reward. Eating a red mushroom gives a reward of 1 to the individual who
ate the mushroom. Eating a green mushroom gives a reward of 2 and it gets
divided equally among all individuals. Eating a blue mushroom gives a reward of
3 and it gets divided among the individuals except the individual who ate the
mushroom. Mushrooms regrowth depends on the type of the mushrooms eaten by
individuals. Red mushrooms regrow with a probability of 0.25 when a mushroom of
any color is eaten. Green mushrooms regrow with a probability of 0.4 when a
green or blue mushroom is eaten. Blue mushrooms regrow with a probability of 0.6
when a blue mushroom is eaten. Each mushroom has a time period that it takes to
digest it. An individual who ate a mushroom gets frozen during the time they are
digesting it. Red mushrooms get digested instantly, green and blue mushrooms
take 5 and 10 steps to digest respectively. In addition, unharvested mushrooms
spoil (and get removed from the game) after a period of time. Red, green and
blue mushrooms spoil after 75, 100 and 200 time steps respectively.
References:
Leibo JZ, Zambaldi V, Lanctot M, Marecki J, Graepel T. Multi-agent Reinforcement
Learning in Sequential Social Dilemmas (2017). AAMAS.
Hughes E, Leibo JZ, Phillips MG, Tuyls K, Duenez-Guzman EA, Garcia Castaneda A,
Dunning I, Zhu T, McKee KR, Koster R, Roff H, Graepel T. Inequity aversion
improves cooperation in intertemporal social dilemmas (2018). NeurIPS.
"""
from typing import Any, Dict, Mapping, Sequence, Text
from ml_collections import config_dict
from meltingpot.python.utils.substrates import colors
from meltingpot.python.utils.substrates import game_object_utils
from meltingpot.python.utils.substrates import shapes
from meltingpot.python.utils.substrates import specs
PrefabConfig = game_object_utils.PrefabConfig
_COMPASS = ["N", "E", "S", "W"]
MARKING_SPRITE = """
oxxxxxxo
xoxxxxox
xxoxxoxx
xxxooxxx
xxxooxxx
xxoxxoxx
xoxxxxox
oxxxxxxo
"""
NW_WALL_CORNER = {
"name": "nw_wall_corner",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "nw_wall_corner",
"stateConfigs": [{
"state": "nw_wall_corner",
"layer": "upperPhysical",
"sprite": "NwWallCorner",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["NwWallCorner"],
"spriteShapes": [shapes.FENCE_NW_CORNER],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
NE_WALL_CORNER = {
"name": "ne_wall_corner",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "ne_wall_corner",
"stateConfigs": [{
"state": "ne_wall_corner",
"layer": "upperPhysical",
"sprite": "NeWallCorner",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["NeWallCorner"],
"spriteShapes": [shapes.FENCE_NE_CORNER],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
NE_INNER_WALL_CORNER = {
"name": "ne_inner_wall_corner",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "ne_inner_wall_corner",
"stateConfigs": [{
"state": "ne_inner_wall_corner",
"layer": "upperPhysical",
"sprite": "ne_inner_wall_corner",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["ne_inner_wall_corner"],
"spriteShapes": [shapes.FENCE_INNER_NE_CORNER],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
NW_INNER_WALL_CORNER = {
"name": "nw_inner_wall_corner",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "nw_inner_wall_corner",
"stateConfigs": [{
"state": "nw_inner_wall_corner",
"layer": "upperPhysical",
"sprite": "nw_inner_wall_corner",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["nw_inner_wall_corner"],
"spriteShapes": [shapes.FENCE_INNER_NW_CORNER],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
SE_WALL_CORNER = {
"name": "se_wall_corner",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "se_wall_corner",
"stateConfigs": [{
"state": "se_wall_corner",
"layer": "upperPhysical",
"sprite": "SeWallCorner",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["SeWallCorner"],
"spriteShapes": [shapes.FENCE_SE_CORNER],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
SW_WALL_CORNER = {
"name": "sw_wall_corner",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "sw_wall_corner",
"stateConfigs": [{
"state": "sw_wall_corner",
"layer": "upperPhysical",
"sprite": "SwWallCorner",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["SwWallCorner"],
"spriteShapes": [shapes.FENCE_SW_CORNER],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
WALL_SHADOW_SW = {
"name": "wall_shadow_sw",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall_shadow_sw",
"stateConfigs": [{
"state": "wall_shadow_sw",
"layer": "upperPhysical",
"sprite": "wall_shadow_sw",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["wall_shadow_sw"],
"spriteShapes": [shapes.FENCE_SHADOW_SW],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
WALL_SHADOW_S = {
"name": "wall_shadow_s",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall_shadow_s",
"stateConfigs": [{
"state": "wall_shadow_s",
"layer": "upperPhysical",
"sprite": "wall_shadow_s",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["wall_shadow_s"],
"spriteShapes": [shapes.FENCE_SHADOW_S],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
WALL_SHADOW_SE = {
"name": "wall_shadow_se",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall_shadow_se",
"stateConfigs": [{
"state": "wall_shadow_se",
"layer": "upperPhysical",
"sprite": "wall_shadow_se",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["wall_shadow_se"],
"spriteShapes": [shapes.FENCE_SHADOW_SE],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
WALL_NORTH = {
"name": "wall_north",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall_north",
"stateConfigs": [{
"state": "wall_north",
"layer": "upperPhysical",
"sprite": "WallNorth",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["WallNorth"],
"spriteShapes": [shapes.FENCE_N],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
WALL_EAST = {
"name": "wall_east",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall_east",
"stateConfigs": [{
"state": "wall_east",
"layer": "upperPhysical",
"sprite": "WallEast",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["WallEast"],
"spriteShapes": [shapes.FENCE_E],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
WALL_SOUTH = {
"name": "wall_south",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall_south",
"stateConfigs": [{
"state": "wall_south",
"layer": "upperPhysical",
"sprite": "WallSouth",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["WallSouth"],
"spriteShapes": [shapes.FENCE_S],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
WALL_WEST = {
"name": "wall_west",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "wall_west",
"stateConfigs": [{
"state": "wall_west",
"layer": "upperPhysical",
"sprite": "WallWest",
}],
}
},
{"component": "Transform"},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["WallWest"],
"spriteShapes": [shapes.FENCE_W],
"palettes": [shapes.FENCE_PALETTE_BROWN],
"noRotates": [False]
}
},
{"component": "BeamBlocker", "kwargs": {"beamType": "zapHit"}},
]
}
def get_marking_palette(alpha: float) -> Mapping[str, Sequence[int]]:
alpha_uint8 = int(alpha * 255)
assert alpha_uint8 >= 0.0 and alpha_uint8 <= 255, "Color value out of range."
return {"x": shapes.ALPHA, "o": (0, 0, 0, alpha_uint8)}
DIRT = {
"name": "dirt",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "dirt",
"stateConfigs": [{
"state": "dirt",
"layer": "background",
"sprite": "Dirt",
}],
}
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["Dirt"],
"spriteShapes": [shapes.DIRT_PATTERN],
"palettes": [{
"x": (81, 70, 32, 255),
"X": (89, 77, 36, 255),
}],
"noRotates": [False]
}
},
{
"component": "Transform",
},
]
}
SPAWN_POINT = {
"name": "spawnPoint",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "spawnPoint",
"stateConfigs": [{
"state": "spawnPoint",
"layer": "logic",
"groups": ["spawnPoints"]
}],
}
},
{
"component": "Transform",
},
]
}
def create_mushroom(initial_state: Text = "wait"):
"""Create a mushroom prefab object."""
mushroom_prefab = {
"name": "mushroom",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": initial_state,
"stateConfigs": [
{
"state": "fullInternalityZeroExternality",
"layer": "lowerPhysical",
"sprite": "FullInternalityZeroExternality",
"groups": ["fullInternalityZeroExternality"],
},
{
"state": "halfInternalityHalfExternality",
"layer": "lowerPhysical",
"sprite": "HalfInternalityHalfExternality",
"groups": ["halfInternalityHalfExternality"],
},
{
"state": "zeroInternalityFullExternality",
"layer": "lowerPhysical",
"sprite": "ZeroInternalityFullExternality",
"groups": ["zeroInternalityFullExternality"],
},
{
"state": "negativeInternalityNegativeExternality",
"layer": "lowerPhysical",
"sprite": "NegativeInternalityNegativeExternality",
"groups": ["negativeInternalityNegativeExternality"],
},
{
"state": "wait",
},
],
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["FullInternalityZeroExternality",
"HalfInternalityHalfExternality",
"ZeroInternalityFullExternality",
"NegativeInternalityNegativeExternality"],
"spriteShapes": [shapes.MUSHROOM] * 4,
"palettes": [
shapes.MUSHROOM_RED_PALETTE,
shapes.MUSHROOM_GREEN_PALETTE,
shapes.MUSHROOM_BLUE_PALETTE,
shapes.MUSHROOM_ORANGE_PALETTE,
],
"noRotates": [True] * 4
}
},
{
"component": "MushroomEating",
"kwargs": {
"totalReward": {
"fullInternalityZeroExternality": 1,
"halfInternalityHalfExternality": 2,
"zeroInternalityFullExternality": 3,
"negativeInternalityNegativeExternality": -1.0,
},
"liveStates": ("fullInternalityZeroExternality",
"halfInternalityHalfExternality",
"zeroInternalityFullExternality",
"negativeInternalityNegativeExternality"),
"numSporesReleasedWhenEaten": {
"fullInternalityZeroExternality": 3,
"halfInternalityHalfExternality": 3,
"zeroInternalityFullExternality": 3,
"negativeInternalityNegativeExternality": 1,
},
"digestionTimes": {
"fullInternalityZeroExternality": 0,
"halfInternalityHalfExternality": 10,
"zeroInternalityFullExternality": 15,
"negativeInternalityNegativeExternality": 15,
},
"destroyOnEating": {
"negativeInternalityNegativeExternality": {
"typeToDestroy": "fullInternalityZeroExternality",
"percentToDestroy": 0.25},
},
},
},
{
"component": "MushroomGrowable",
"kwargs": {}
},
{
"component": "Destroyable",
"kwargs": {
"initialHealth": 1,
"waitState": "wait",
}
},
{
"component": "Perishable",
"kwargs": {
"waitState": "wait",
"delayPerState": {
"fullInternalityZeroExternality": 200,
"halfInternalityHalfExternality": 100,
"zeroInternalityFullExternality": 75,
"negativeInternalityNegativeExternality": 1e7,
}
}
},
]
}
return mushroom_prefab
# Primitive action components.
# pylint: disable=bad-whitespace
# pyformat: disable
NOOP = {"move": 0, "turn": 0, "fireZap": 0}
FORWARD = {"move": 1, "turn": 0, "fireZap": 0}
STEP_RIGHT = {"move": 2, "turn": 0, "fireZap": 0}
BACKWARD = {"move": 3, "turn": 0, "fireZap": 0}
STEP_LEFT = {"move": 4, "turn": 0, "fireZap": 0}
TURN_LEFT = {"move": 0, "turn": -1, "fireZap": 0}
TURN_RIGHT = {"move": 0, "turn": 1, "fireZap": 0}
FIRE_ZAP = {"move": 0, "turn": 0, "fireZap": 1}
# pyformat: enable
# pylint: enable=bad-whitespace
ACTION_SET = (
NOOP,
FORWARD,
BACKWARD,
STEP_LEFT,
STEP_RIGHT,
TURN_LEFT,
TURN_RIGHT,
FIRE_ZAP,
)
# Remove the first entry from human_readable_colors after using it for the self
# color to prevent it from being used again as another avatar color.
light_desaturated_avatar_palette = list(
colors.light_desaturated_avatar_palette)
TARGET_SPRITE_SELF = {
"name": "Self",
"shape": shapes.CUTE_AVATAR,
"palette": shapes.get_palette(light_desaturated_avatar_palette.pop(0)),
"noRotate": True,
}
def create_prefabs() -> PrefabConfig:
"""Returns the prefabs.
Prefabs are a dictionary mapping names to template game objects that can
be cloned and placed in multiple locations accoring to an ascii map.
"""
prefabs = {
"dirt": DIRT,
"spawn_point": SPAWN_POINT,
"red_mushroom": create_mushroom(
initial_state="fullInternalityZeroExternality"),
"green_mushroom": create_mushroom(
initial_state="halfInternalityHalfExternality"),
"blue_mushroom": create_mushroom(
initial_state="zeroInternalityFullExternality"),
"orange_mushroom": create_mushroom(
initial_state="negativeInternalityNegativeExternality"),
"potential_mushroom": create_mushroom(initial_state="wait"),
# fence prefabs
"nw_wall_corner": NW_WALL_CORNER,
"nw_inner_wall_corner": NW_INNER_WALL_CORNER,
"ne_wall_corner": NE_WALL_CORNER,
"ne_inner_wall_corner": NE_INNER_WALL_CORNER,
"se_wall_corner": SE_WALL_CORNER,
"sw_wall_corner": SW_WALL_CORNER,
"wall_north": WALL_NORTH,
"wall_east": WALL_EAST,
"wall_south": WALL_SOUTH,
"wall_west": WALL_WEST,
"wall_shadow_sw": WALL_SHADOW_SW,
"wall_shadow_s": WALL_SHADOW_S,
"wall_shadow_se": WALL_SHADOW_SE,
}
return prefabs
def create_scene():
"""Create the scene object, a non-physical object to hold global logic."""
scene = {
"name": "scene",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "scene",
"stateConfigs": [{
"state": "scene",
}],
}
},
{
"component": "Transform",
},
{
"component": "MushroomRegrowth",
"kwargs": {
"mushroomsToProbabilities": {
"fullInternalityZeroExternality": {
"fullInternalityZeroExternality": 0.25,
"halfInternalityHalfExternality": 0.0,
"zeroInternalityFullExternality": 0.0,
"negativeInternalityNegativeExternality": 0.0,
},
"halfInternalityHalfExternality": {
"fullInternalityZeroExternality": 0.25,
"halfInternalityHalfExternality": 0.4,
"zeroInternalityFullExternality": 0.0,
"negativeInternalityNegativeExternality": 0.0,
},
"zeroInternalityFullExternality": {
"fullInternalityZeroExternality": 0.25,
"halfInternalityHalfExternality": 0.4,
"zeroInternalityFullExternality": 0.6,
"negativeInternalityNegativeExternality": 0.0,
},
"negativeInternalityNegativeExternality": {
"fullInternalityZeroExternality": 0.0,
"halfInternalityHalfExternality": 0.0,
"zeroInternalityFullExternality": 0.0,
"negativeInternalityNegativeExternality": 1.0,
},
},
"minPotentialMushrooms": 1,
}
},
{
"component": "StochasticIntervalEpisodeEnding",
"kwargs": {
"minimumFramesPerEpisode": 1000,
"intervalLength": 100, # Set equal to unroll length.
"probabilityTerminationPerInterval": 0.2
}
},
]
}
return scene
def create_avatar_object(player_idx: int,
target_sprite_self: Dict[str, Any]) -> Dict[str, Any]:
"""Create an avatar object that always sees itself as blue."""
# Lua is 1-indexed.
lua_index = player_idx + 1
# Setup the self vs other sprite mapping.
source_sprite_self = "Avatar" + str(lua_index)
custom_sprite_map = {source_sprite_self: target_sprite_self["name"]}
live_state_name = "player{}".format(lua_index)
avatar_object = {
"name": f"avatar{lua_index}",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": live_state_name,
"stateConfigs": [
# Initial player state.
{"state": live_state_name,
"layer": "upperPhysical",
"sprite": source_sprite_self,
"contact": "avatar",
"groups": ["players"]},
# Player wait type for times when they are zapped out.
{"state": "playerWait",
"groups": ["playerWaits"]},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": [source_sprite_self],
"spriteShapes": [shapes.CUTE_AVATAR],
"palettes": [
shapes.get_palette(
light_desaturated_avatar_palette[player_idx])
],
"noRotates": [True]
}
},
{
"component": "AdditionalSprites",
"kwargs": {
"renderMode": "ascii_shape",
"customSpriteNames": [target_sprite_self["name"]],
"customSpriteShapes": [target_sprite_self["shape"]],
"customPalettes": [target_sprite_self["palette"]],
"customNoRotates": [target_sprite_self["noRotate"]],
}
},
{
"component": "Avatar",
"kwargs": {
"index": lua_index,
"aliveState": live_state_name,
"waitState": "playerWait",
"spawnGroup": "spawnPoints",
"actionOrder": ["move",
"turn",
"fireZap"],
"actionSpec": {
"move": {"default": 0, "min": 0, "max": len(_COMPASS)},
"turn": {"default": 0, "min": -1, "max": 1},
"fireZap": {"default": 0, "min": 0, "max": 1},
},
"view": {
"left": 5,
"right": 5,
"forward": 9,
"backward": 1,
"centered": False
},
"spriteMap": custom_sprite_map,
}
},
{
"component": "Zapper",
"kwargs": {
"cooldownTime": 3,
"beamLength": 3,
"beamRadius": 1,
"framesTillRespawn": 50,
"penaltyForBeingZapped": 0,
"rewardForZapping": 0,
# GraduatedSanctionsMarking handles removal instead of Zapper.
"removeHitPlayer": False,
}
},
{
"component": "ReadyToShootObservation",
},
{
"component": "Cumulants",
},
{
"component": "LocationObserver",
"kwargs": {
"objectIsAvatar": True,
"alsoReportOrientation": True
}
},
{
"component": "AvatarMetricReporter",
"kwargs": {
"metrics": [
{
"name": "ATE_MUSHROOM_FIZE",
"type": "Doubles",
"shape": [],
"component": "Cumulants",
"variable": "ate_mushroom_fize",
},
{
"name": "ATE_MUSHROOM_HIHE",
"type": "Doubles",
"shape": [],
"component": "Cumulants",
"variable": "ate_mushroom_hihe",
},
{
"name": "ATE_MUSHROOM_ZIFE",
"type": "Doubles",
"shape": [],
"component": "Cumulants",
"variable": "ate_mushroom_zife",
},
{
"name": "ATE_MUSHROOM_NINE",
"type": "Doubles",
"shape": [],
"component": "Cumulants",
"variable": "ate_mushroom_nine",
},
{
"name": "DESTROYED_MUSHROOM_FIZE",
"type": "Doubles",
"shape": [],
"component": "Cumulants",
"variable": "destroyed_mushroom_fize",
},
{
"name": "DESTROYED_MUSHROOM_HIHE",
"type": "Doubles",
"shape": [],
"component": "Cumulants",
"variable": "destroyed_mushroom_hihe",
},
{
"name": "DESTROYED_MUSHROOM_ZIFE",
"type": "Doubles",
"shape": [],
"component": "Cumulants",
"variable": "destroyed_mushroom_zife",
},
{
"name": "DESTROYED_MUSHROOM_NINE",
"type": "Doubles",
"shape": [],
"component": "Cumulants",
"variable": "destroyed_mushroom_nine",
},
]
}
},
]
}
return avatar_object
def create_marking_overlay(player_idx: int) -> Mapping[str, Any]:
"""Create a graduated sanctions marking overlay object."""
# Lua is 1-indexed.
lua_idx = player_idx + 1
marking_object = {
"name": "avatar_marking",
"components": [
{
"component": "StateManager",
"kwargs": {
"initialState": "avatarMarkingWait",
"stateConfigs": [
# Declare one state per level of the hit logic.
{"state": "level_1",
"layer": "superOverlay",
"sprite": "sprite_for_level_1"},
{"state": "level_2",
"layer": "superOverlay",
"sprite": "sprite_for_level_2"},
# Invisible inactive (zapped out) overlay type.
{"state": "avatarMarkingWait",
"groups": ["avatarMarkingWaits"]},
]
}
},
{
"component": "Transform",
},
{
"component": "Appearance",
"kwargs": {
"renderMode": "ascii_shape",
"spriteNames": ["sprite_for_level_1",
"sprite_for_level_2"],
"spriteShapes": [MARKING_SPRITE,
MARKING_SPRITE],
"palettes": [get_marking_palette(0.0),
get_marking_palette(1.0)],
"noRotates": [True] * 3
}
},
{
"component": "GraduatedSanctionsMarking",
"kwargs": {
"playerIndex": lua_idx,
"waitState": "avatarMarkingWait",
"hitName": "zapHit",
"recoveryTime": 50,
"hitLogic": [
{"levelIncrement": 1,
"sourceReward": 0,
"targetReward": 0,
"freeze": 25},
{"levelIncrement": -1,
"sourceReward": 0,
"targetReward": 0,
"remove": True}
],
}
},
]
}
return marking_object
def create_avatar_objects(num_players):
"""Returns list of avatar objects of length 'num_players'."""
avatar_objects = []
for player_idx in range(0, num_players):
game_object = create_avatar_object(player_idx,
TARGET_SPRITE_SELF)
avatar_objects.append(game_object)
marking_object = create_marking_overlay(player_idx)
avatar_objects.append(marking_object)
return avatar_objects
def get_config():
"""Default configuration for this substrate."""
config = config_dict.ConfigDict()
# Action set configuration.
config.action_set = ACTION_SET
# Observation format configuration.
config.individual_observation_names = [
"RGB",
"READY_TO_SHOOT",
# Debug only (do not use the following observations in policies).
"POSITION",
"ORIENTATION",
]
config.global_observation_names = [
"WORLD.RGB",
]
# The specs of the environment (from a single-agent perspective).
config.action_spec = specs.action(len(ACTION_SET))
# The roles assigned to each player.
config.valid_roles = frozenset({"default"})
return config
def build(
roles: Sequence[str],
config: config_dict.ConfigDict,
) -> Mapping[str, Any]:
"""Build substrate definition given player roles."""
num_players = len(roles)
# Build the rest of the substrate definition.
substrate_definition = dict(
levelName="externality_mushrooms",
levelDirectory="meltingpot/lua/levels",
numPlayers=num_players,
# Define upper bound of episode length since episodes end stochastically.
maxEpisodeLengthFrames=5000,
spriteSize=8,
topology="BOUNDED", # Choose from ["BOUNDED", "TORUS"],
simulation={
"map": config.layout.ascii_map,
"gameObjects": create_avatar_objects(num_players),
"scene": create_scene(),
"prefabs": create_prefabs(),
"charPrefabMap": config.layout.char_prefab_map,
},
)
return substrate_definition
|
{
"content_hash": "4e11b949fb16eb16abcc7ee814c8afa5",
"timestamp": "",
"source": "github",
"line_count": 1064,
"max_line_length": 80,
"avg_line_length": 33.99812030075188,
"alnum_prop": 0.45184386575993807,
"repo_name": "deepmind/meltingpot",
"id": "20ec5465d27a05e9bc8c1602c0e1813cf227cfe3",
"size": "36769",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "meltingpot/python/configs/substrates/externality_mushrooms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1732"
},
{
"name": "Lua",
"bytes": "674594"
},
{
"name": "Python",
"bytes": "1768669"
},
{
"name": "Shell",
"bytes": "2923"
}
],
"symlink_target": ""
}
|
"""Automatically detects if file resides in a ruby on rails application and set the properly language."""
import gedit, os
class RubyOnRailsLoader(gedit.Plugin):
"""Automatically strip all trailing whitespace before saving."""
def activate(self, window):
"""Activate plugin."""
self.window = window
handler_id = window.connect("tab-added", self.on_window_tab_added)
window.set_data(self.__class__.__name__, handler_id)
for doc in window.get_documents():
self.connect_document(doc)
def connect_document(self, doc):
"""Connect to document's 'load' signal."""
handler_id = doc.connect("loaded", self.on_document_load)
doc.set_data(self.__class__.__name__, handler_id)
def deactivate(self, window):
"""Deactivate plugin."""
name = self.__class__.__name__
handler_id = window.get_data(name)
window.disconnect(handler_id)
window.set_data(name, None)
def on_window_tab_added(self, window, tab):
"""Connect the document in tab."""
doc = tab.get_document()
self.connect_document(doc)
def on_document_load(self, doc, *args):
language = doc.get_language()
if language:
lang = language.get_id()
if lang == 'ruby':
uri = doc.get_uri_for_display()
if self.get_in_rails(uri):
lang = gedit.get_language_manager().get_language('rubyonrails')
doc.set_language(lang)
# Uggly workarroud to call update_ui
view = gedit.tab_get_from_document(doc).get_view()
editable = view.get_editable()
view.set_editable(not editable)
view.set_editable(editable)
def get_in_rails(self, uri):
rails_root = self.get_data('RailsLoaderRoot')
if rails_root:
return rails_root
base_dir = os.path.dirname(uri)
depth = 10
while depth > 0:
depth -= 1
app_dir = os.path.join(base_dir, 'app')
config_dir = os.path.join(base_dir, 'config')
environment_file = os.path.join(base_dir, 'config', 'environment.rb')
if os.path.isdir(app_dir) and os.path.isdir(config_dir) and os.path.isfile(environment_file):
rails_root = base_dir
break
else:
base_dir = os.path.abspath(os.path.join(base_dir, '..'))
if rails_root:
self.set_data('RailsLoaderRoot', rails_root)
return True
return False
def set_data(self, name, value):
self.window.get_active_tab().get_view().set_data(name, value)
def get_data(self, name):
return self.window.get_active_tab().get_view().get_data(name)
|
{
"content_hash": "332efe8b381a69bc1a3c620072abaaba",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 105,
"avg_line_length": 33.95238095238095,
"alnum_prop": 0.5669705469845723,
"repo_name": "icebreaker/dotfiles",
"id": "e9c6342df9f734c4c45048441b0b59cbcf612617",
"size": "3599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gnome/gnome2/gedit/plugins.symlink/rubyonrailsloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6086"
},
{
"name": "C++",
"bytes": "2460"
},
{
"name": "CSS",
"bytes": "336717"
},
{
"name": "HTML",
"bytes": "3559"
},
{
"name": "Lua",
"bytes": "1143"
},
{
"name": "Perl",
"bytes": "2966"
},
{
"name": "Python",
"bytes": "884750"
},
{
"name": "Ruby",
"bytes": "50360"
},
{
"name": "Shell",
"bytes": "42835"
},
{
"name": "Vim Script",
"bytes": "3955400"
},
{
"name": "Zig",
"bytes": "1179"
}
],
"symlink_target": ""
}
|
"""
This module provides the base sparse command and a load hook for dynamically
adding other subcommands. The "load_suparsers" function searches for modules
in the streamparse/bin directory that have a "subparser_hook" method. The
"subparser_hook" accepts a the sparse subparsers object and adds it's
subparser as needed.
"""
import argparse
import importlib
import os
import pkgutil
import sys
from ..util import die
from ..version import __version__
def load_subparsers(subparsers):
"""
searches modules in streamparse/bin for a 'subparser_hook' method and calls
the 'subparser_hook' method on the sparse subparsers object.
"""
for _, mod_name, is_pkg in pkgutil.iter_modules([os.path.dirname(__file__)]):
if not is_pkg and mod_name not in sys.modules:
module = importlib.import_module(f"streamparse.cli.{mod_name}")
# check for the subparser hook
if hasattr(module, "subparser_hook"):
module.subparser_hook(subparsers)
def main():
"""main entry point for sparse"""
parser = argparse.ArgumentParser(
description="Utilities for managing Storm" "/streamparse topologies.",
epilog="sparse provides a front-end to "
"streamparse, a framework for "
"creating Python projects for "
"running, debugging, and "
"submitting computation topologies "
"against real-time streams, using "
"Apache Storm. It requires java and"
" lein (Clojure build tool) to be "
"on your $PATH, and uses lein and "
"Clojure under the hood for JVM/"
"Thrift interop.",
)
subparsers = parser.add_subparsers(title="sub-commands")
parser.add_argument(
"--version", action="version", version=f"%(prog)s {__version__}"
)
load_subparsers(subparsers)
def _help_command(args):
"""Print help information about other commands.
Does the same thing as adding --help flag to sub-command calls.
"""
subparsers.choices[args.sub_command].print_help()
sys.exit(1)
help_parser = subparsers.add_parser(
"help",
description=_help_command.__doc__,
help=_help_command.__doc__.splitlines()[0],
)
help_parser.add_argument(
"sub_command",
help="The command to provide help for.",
choices=sorted(subparsers.choices.keys()),
)
help_parser.set_defaults(func=_help_command)
args = parser.parse_args()
if os.getuid() == 0 and not os.getenv("LEIN_ROOT"):
die(
"Because streamparse relies on Leiningen, you cannot run "
"streamparse as root without the LEIN_ROOT environment variable "
"set. Otherwise, Leiningen would hang indefinitely under-the-hood "
"waiting for user input."
)
# http://grokbase.com/t/python/python-bugs-list/12arsq9ayf/issue16308-undocumented-behaviour-change-in-argparse-from-3-2-3-to-3-3-0
if hasattr(args, "func"):
args.func(args)
# python3.3+ argparse changes
else:
parser.print_help()
sys.exit(1)
if __name__ == "__main__":
main()
|
{
"content_hash": "656df382b4231878dff2a7d6c968095e",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 135,
"avg_line_length": 33.89247311827957,
"alnum_prop": 0.6424492385786802,
"repo_name": "Parsely/streamparse",
"id": "a99c1d6f2c4401a389d71cdc4f8aebbc9c42e444",
"size": "3152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streamparse/cli/sparse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "517"
},
{
"name": "Python",
"bytes": "211941"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
}
|
"""Misc file tests.
Made for Jython.
"""
from __future__ import with_statement
import os
import unittest
from test import test_support
class FileTestCase(unittest.TestCase):
def tearDown(self):
if os.path.exists(test_support.TESTFN):
os.remove(test_support.TESTFN)
def test_append(self):
# http://bugs.jython.org/issue1466
mode = 'ab'
fp1 = open(test_support.TESTFN, mode)
fp1.write('test1\n')
fp2 = open(test_support.TESTFN, mode)
fp2.write('test2\n')
fp1.close()
fp2.close()
with open(test_support.TESTFN) as fp:
self.assertEqual('test1\ntest2\n', fp.read())
def test_appendplus(self):
# regression with the test_append fix:
# http://bugs.jython.org/issue1576
with open(test_support.TESTFN, 'ab+') as fp:
fp.write('test1\n')
fp.seek(0)
self.assertEqual(fp.read(), 'test1\n')
def test_issue1825(self):
testfnu = unicode(test_support.TESTFN)
try:
open(testfnu)
except IOError, e:
self.assertTrue(isinstance(e.filename, unicode))
self.assertEqual(e.filename, testfnu)
else:
self.assertTrue(False)
def test_main():
test_support.run_unittest(FileTestCase)
if __name__ == '__main__':
test_main()
|
{
"content_hash": "b2f42a5157f6595ae32a96e263864b08",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 60,
"avg_line_length": 26.346153846153847,
"alnum_prop": 0.5897810218978102,
"repo_name": "ofermend/medicare-demo",
"id": "da5412297508b2562ea7fe311be78293d2727e9b",
"size": "1370",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "socialite/jython/Lib/test/test_file_jy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "35489"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "Java",
"bytes": "2267927"
},
{
"name": "PigLatin",
"bytes": "5097"
},
{
"name": "Python",
"bytes": "10846103"
},
{
"name": "R",
"bytes": "752"
},
{
"name": "Shell",
"bytes": "30621"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import mock
from django.test import TestCase
from django.views.generic import View
from django_universal_view_decorator import ViewDecoratorBase
def test_log(*args, **kwargs):
pass
# Test views
def regular_view_function(request, *args, **kwargs):
test_log(regular_view_function, request, *args, **kwargs)
return 'response'
# Test decorators
class MyViewDecorator(ViewDecoratorBase):
def _call_view_function(self, decoration_instance, view_class_instance, view_function, *args, **kwargs):
test_log(MyViewDecorator)
return view_function(*args, **kwargs)
class MyViewDecoratorWithArg(ViewDecoratorBase):
def __init__(self, *args, **kwargs):
super(MyViewDecoratorWithArg, self).__init__()
self.args = args
self.kwargs = kwargs
def _call_view_function(self, decoration_instance, view_class_instance, view_function, *args, **kwargs):
test_log(MyViewDecoratorWithArg, *self.args, **self.kwargs)
args += self.args
kwargs.update(self.kwargs)
return view_function(*args, **kwargs)
class MyViewDecoratorThatDoesntCallTheView(ViewDecoratorBase):
def _call_view_function(self, decoration_instance, view_class_instance, view_function, *args, **kwargs):
test_log(MyViewDecoratorThatDoesntCallTheView)
return 'decorator_response'
# Tests
@mock.patch(__name__ + '.test_log', wraps=test_log)
class TestDecoratingViewFunctionAndViewMethodWithoutUniversalWrapper(TestCase):
def test_regular_view_function_without_decorator_args(self, mock_test_log):
decorated = MyViewDecorator()(regular_view_function)
response = decorated('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecorator),
mock.call(regular_view_function, 'request'),
])
def test_regular_view_function_with_decorator_args(self, mock_test_log):
decorated = MyViewDecoratorWithArg(42)(regular_view_function)
response = decorated('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, 42),
mock.call(regular_view_function, 'request', 42),
])
def test_view_class_method_without_decorator_args(self, mock_test_log):
class ViewClass(View):
@MyViewDecorator()
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', request, *args, **kwargs)
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecorator),
mock.call('dispatch', 'request'),
])
def test_view_class_method_with_decorator_args(self, mock_test_log):
class ViewClass(View):
@MyViewDecoratorWithArg(42)
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', request, *args, **kwargs)
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, 42),
mock.call('dispatch', 'request', 42),
])
@mock.patch(__name__ + '.test_log', wraps=test_log)
class TestDecoratingViewsWithUniversalWrapper(TestCase):
def test_regular_view_function(self, mock_test_log):
decorated = MyViewDecoratorWithArg.universal_decorator(42)(regular_view_function)
response = decorated('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, 42),
mock.call(regular_view_function, 'request', 42),
])
def test_regular_view_function_with_decorator_that_doesnt_call_the_view(self, mock_test_log):
decorated = MyViewDecoratorThatDoesntCallTheView.universal_decorator(regular_view_function)
response = decorated('request')
self.assertEqual(response, 'decorator_response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorThatDoesntCallTheView),
])
def test_view_class_method(self, mock_test_log):
class ViewClass(View):
@MyViewDecoratorWithArg.universal_decorator(42)
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', request, *args, **kwargs)
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, 42),
mock.call('dispatch', 'request', 42),
])
def test_view_class_method_with_decorator_that_doesnt_call_the_view(self, mock_test_log):
class ViewClass(View):
@MyViewDecoratorThatDoesntCallTheView.universal_decorator
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', request, *args, **kwargs)
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'decorator_response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorThatDoesntCallTheView),
])
def test_view_class(self, mock_test_log):
@MyViewDecoratorWithArg.universal_decorator(42)
class ViewClass(View):
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', request, *args, **kwargs)
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, 42),
mock.call('dispatch', 'request', 42),
])
def test_view_class_with_decorator_that_doesnt_call_the_view(self, mock_test_log):
@MyViewDecoratorThatDoesntCallTheView.universal_decorator
class ViewClass(View):
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', request, *args, **kwargs)
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'decorator_response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorThatDoesntCallTheView),
])
@mock.patch(__name__ + '.test_log', wraps=test_log)
class TestStackedDecoration(TestCase):
def test_regular_view_function_with_stacked_decorators(self, mock_test_log):
@MyViewDecoratorWithArg(arg0=40)
@MyViewDecoratorWithArg(arg1=41)
@MyViewDecoratorWithArg(arg2=42)
def view_function(request, *args, **kwargs):
test_log('view_function', request, *args, **kwargs)
return 'response'
response = view_function('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, arg0=40),
mock.call(MyViewDecoratorWithArg, arg1=41),
mock.call(MyViewDecoratorWithArg, arg2=42),
mock.call('view_function', 'request', arg0=40, arg1=41, arg2=42),
])
def test_view_class_method_with_stacked_decorators(self, mock_test_log):
class ViewClass(View):
@MyViewDecoratorWithArg(arg0=40)
@MyViewDecoratorWithArg(arg1=41)
@MyViewDecoratorWithArg(arg2=42)
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', request, *args, **kwargs)
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, arg0=40),
mock.call(MyViewDecoratorWithArg, arg1=41),
mock.call(MyViewDecoratorWithArg, arg2=42),
mock.call('dispatch', 'request', arg0=40, arg1=41, arg2=42),
])
def test_regular_view_function_with_stacked_universal_decorators(self, mock_test_log):
@MyViewDecoratorWithArg.universal_decorator(arg0=40)
@MyViewDecoratorWithArg.universal_decorator(arg1=41)
@MyViewDecoratorWithArg.universal_decorator(arg2=42)
def view_function(request, *args, **kwargs):
test_log('view_function', request, *args, **kwargs)
return 'response'
response = view_function('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, arg0=40),
mock.call(MyViewDecoratorWithArg, arg1=41),
mock.call(MyViewDecoratorWithArg, arg2=42),
mock.call('view_function', 'request', arg0=40, arg1=41, arg2=42),
])
def test_view_class_method_with_stacked_universal_decorators(self, mock_test_log):
class ViewClass(View):
@MyViewDecoratorWithArg.universal_decorator(arg0=40)
@MyViewDecoratorWithArg.universal_decorator(arg1=41)
@MyViewDecoratorWithArg.universal_decorator(arg2=42)
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', request, *args, **kwargs)
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, arg0=40),
mock.call(MyViewDecoratorWithArg, arg1=41),
mock.call(MyViewDecoratorWithArg, arg2=42),
mock.call('dispatch', 'request', arg0=40, arg1=41, arg2=42),
])
def test_view_class_with_stacked_universal_decorators(self, mock_test_log):
@MyViewDecoratorWithArg.universal_decorator(arg0=40)
@MyViewDecoratorWithArg.universal_decorator(arg1=41)
@MyViewDecoratorWithArg.universal_decorator(arg2=42)
class ViewClass(View):
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', request, *args, **kwargs)
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, arg0=40),
mock.call(MyViewDecoratorWithArg, arg1=41),
mock.call(MyViewDecoratorWithArg, arg2=42),
mock.call('dispatch', 'request', arg0=40, arg1=41, arg2=42),
])
def test_regular_view_function_with_stacked_and_mixed_universal_and_non_universal_decorators(self, mock_test_log):
@MyViewDecoratorWithArg(arg0=40)
@MyViewDecoratorWithArg.universal_decorator(arg1=41)
@MyViewDecoratorWithArg(arg2=42)
def view_function(request, *args, **kwargs):
test_log('view_function', request, *args, **kwargs)
return 'response'
response = view_function('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, arg0=40),
mock.call(MyViewDecoratorWithArg, arg1=41),
mock.call(MyViewDecoratorWithArg, arg2=42),
mock.call('view_function', 'request', arg0=40, arg1=41, arg2=42),
])
def test_view_class_method_with_stacked_and_mixed_universal_and_non_universal_decorators(self, mock_test_log):
class ViewClass(View):
@MyViewDecoratorWithArg(arg0=40)
@MyViewDecoratorWithArg.universal_decorator(arg1=41)
@MyViewDecoratorWithArg(arg2=42)
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', request, *args, **kwargs)
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call(MyViewDecoratorWithArg, arg0=40),
mock.call(MyViewDecoratorWithArg, arg1=41),
mock.call(MyViewDecoratorWithArg, arg2=42),
mock.call('dispatch', 'request', arg0=40, arg1=41, arg2=42),
])
class TestInternals(TestCase):
@mock.patch(__name__ + '.test_log', wraps=test_log)
def test_default_call_view_function_implementation_calls_the_wrapped_view(self, mock_test_log):
class MyDecorator(ViewDecoratorBase):
def _call_view_function(self, decoration_instance, view_class_instance, view_function, *args, **kwargs):
test_log('decorator', 'testing_default_call_view_function_implementation')
return super(MyDecorator, self)._call_view_function(
decoration_instance, view_class_instance, view_function, *args, **kwargs)
@MyDecorator.universal_decorator
def view_function(request):
test_log('view_function', request)
return 'response'
response = view_function('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator', 'testing_default_call_view_function_implementation'),
mock.call('view_function', 'request'),
])
|
{
"content_hash": "1d73ef2b36154f8c623faa81840010d8",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 118,
"avg_line_length": 42.6551724137931,
"alnum_prop": 0.6405526567208054,
"repo_name": "pasztorpisti/django-universal-view-decorator",
"id": "5916ac2706e4a553cae51d33b48ca9ab79949d7a",
"size": "13607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_view_decorator_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131370"
}
],
"symlink_target": ""
}
|
'''Klassz étterem page subclass module.'''
import datetime
import re
import dateutil.parser # dateutils
import page # FoodAId
class KlasszEtterem(page.Page):
'''Page handling for this restaurant.'''
def __init__(self, scraper):
'''Constructor.'''
super(KlasszEtterem, self).__init__(scraper, "klasszetterem", 6)
@classmethod
def _find_end_position(cls, message, date, start_position):
'''Finds the end position of the daily menu.'''
index = message.find(
cls.get_hungarian_weekday((date + datetime.timedelta(days=1)).strftime("%A")),
start_position)
if index == -1:
index = message.find('#', start_position)
return (index
if index != -1
else len(message))
@classmethod
def _find_start_position(cls, message, date):
'''Finds the start position of the daily menu.'''
weekday_hun = cls.get_hungarian_weekday(date.strftime("%A"))
return message.find(weekday_hun) + len(weekday_hun) + 1
def get_menu(self, local_time, posts):
'''Retrieves the menu of the page for the specified date from the messages provided.'''
weekday_hun = self.get_hungarian_weekday(local_time.strftime("%A"))
previous_weekday_hun = self.get_hungarian_weekday((local_time + datetime.timedelta(days=1)).strftime("%A"))
next_weekday_hun = self.get_hungarian_weekday((local_time - datetime.timedelta(days=1)).strftime("%A"))
menu = '*' + self.name + ':*\n'
was_found = False
for post in posts:
if 'message' not in post:
continue
creation_time_utc = dateutil.parser.parse(post['created_time'])
creation_time = self._get_local_time(creation_time_utc, local_time.tzinfo)
if ((local_time.date() - creation_time.date()).days < self.update_interval and
weekday_hun in post['message'] and
(previous_weekday_hun in post['message'] or
next_weekday_hun in post['message'])):
start_position = self._find_start_position(post['message'], local_time)
if start_position == -1:
continue
end_position = self._find_end_position(post['message'], local_time, start_position)
menu += post['message'][start_position:end_position].lstrip().rstrip()
was_found = True
break
if was_found:
menu = ('\n> '.join(menu.split('\n')))
return menu
else:
return ''
|
{
"content_hash": "02a0abbbb75516211e138d934e653e6d",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 115,
"avg_line_length": 34.38961038961039,
"alnum_prop": 0.573262839879154,
"repo_name": "Pregnor/foodAId",
"id": "77f74517eb94ad0079c291a24040106ca4f2c527",
"size": "2673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "klasszetterem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32905"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import io
import tokenize
NON_CODE_TOKENS = frozenset((
tokenize.COMMENT, tokenize.ENDMARKER, tokenize.NEWLINE, tokenize.NL,
))
def check_docstring_first(src, filename='<unknown>'):
"""Returns nonzero if the source has what looks like a docstring that is
not at the beginning of the source.
A string will be considered a docstring if it is a STRING token with a
col offset of 0.
"""
found_docstring_line = None
found_code_line = None
tok_gen = tokenize.generate_tokens(io.StringIO(src).readline)
for tok_type, _, (sline, scol), _, _ in tok_gen:
# Looks like a docstring!
if tok_type == tokenize.STRING and scol == 0:
if found_docstring_line is not None:
print(
'{0}:{1} Multiple module docstrings '
'(first docstring on line {2}).'.format(
filename, sline, found_docstring_line,
)
)
return 1
elif found_code_line is not None:
print(
'{0}:{1} Module docstring appears after code '
'(code seen on line {2}).'.format(
filename, sline, found_code_line,
)
)
return 1
else:
found_docstring_line = sline
elif tok_type not in NON_CODE_TOKENS and found_code_line is None:
found_code_line = sline
return 0
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
args = parser.parse_args(argv)
retv = 0
for filename in args.filenames:
contents = io.open(filename).read()
retv |= check_docstring_first(contents, filename=filename)
return retv
|
{
"content_hash": "8f5c61711dffd2ae10cd105fcae1b758",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 76,
"avg_line_length": 30.46031746031746,
"alnum_prop": 0.5685252735799896,
"repo_name": "Coverfox/pre-commit-hooks",
"id": "da5425d2cb60a37e504a9655578f635317bb0239",
"size": "1919",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pre_commit_hooks/check_docstring_first.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "375"
},
{
"name": "Python",
"bytes": "70023"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
import pytest
from mittab.apps.tab.models import Debater, Team
from mittab.libs.tests.assertion import assert_nearly_equal
from mittab.libs.tests.data.load_data import load_debater_rankings
from mittab.libs.tests.data.load_data import load_team_rankings
from mittab.libs.tab_logic.rankings import TeamScore, DebaterScore
@pytest.mark.django_db
class TestRankingLogic(TestCase):
"""Tests that the methods related to debater and team scoring work as
expected"""
fixtures = ["testing_finished_db"]
pytestmark = pytest.mark.django_db
def test_debater_score(self):
""" Comprehensive test of ranking calculations, done on real world
data that has real world problems (e.g. teams not paired in, ironmen,
etc ...)
"""
debaters = Debater.objects.order_by("pk")
actual_scores = [(debater.name,
DebaterScore(debater).scoring_tuple()[:6])
for debater in debaters]
actual_scores = dict(actual_scores)
expected_scores = dict(load_debater_rankings())
assert len(expected_scores) == len(actual_scores)
for name, actual_score in actual_scores.items():
left, right = actual_score, expected_scores[name]
msg = "{} - actual: {}, expected {}".format(name, left, right)
[
assert_nearly_equal(*pair, message=msg)
for pair in zip(left, right)
]
def test_team_score(self):
""" Comprehensive test of team scoring calculations, done on real
world data that has real world inaccuracies """
teams = Team.objects.order_by("pk")
actual_scores = [(team.name, TeamScore(team).scoring_tuple()[:8])
for team in teams]
actual_scores = dict(actual_scores)
expected_scores = dict(load_team_rankings())
assert len(actual_scores) == len(expected_scores)
for team_name, actual_score in actual_scores.items():
left, right = actual_score, expected_scores[team_name]
msg = "{} - actual: {}, expected: {}".format(
team_name, left, right)
[
assert_nearly_equal(*pair, message=msg)
for pair in zip(left, right)
]
|
{
"content_hash": "0d1a7eb4ffbfa31ef2e1b088e0ba2331",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 42.27272727272727,
"alnum_prop": 0.6163440860215054,
"repo_name": "jolynch/mit-tab",
"id": "73be0a49948dfc67964cd6beea11e95c120e1b9a",
"size": "2325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mittab/libs/tests/tab_logic/test_ranking_logic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17375"
},
{
"name": "HTML",
"bytes": "59858"
},
{
"name": "JavaScript",
"bytes": "13569"
},
{
"name": "Makefile",
"bytes": "344"
},
{
"name": "Python",
"bytes": "262840"
},
{
"name": "Shell",
"bytes": "1469"
}
],
"symlink_target": ""
}
|
import json
import logging
import sys
from os.path import dirname, join
# The working path for the Azure Function doesn't include this file's folder
sys.path.append(dirname(dirname(__file__)))
from c7n_azure import handler, entry
try:
import azure.functions as func
from azure.functions_worker.bindings.http import HttpRequest
except ImportError:
pass
def main(input):
logging.info("Running Azure Cloud Custodian Policy")
context = {
'config_file': join(dirname(__file__), 'config.json'),
'auth_file': join(dirname(__file__), 'auth.json')
}
event = None
if type(input) is HttpRequest:
event = input.get_json()
logging.info(event)
# handshake with event grid subscription creation
if 'data' in event[0] and 'validationCode' in event[0]['data']:
code = event[0]['data']['validationCode']
response = {
"validationResponse": code
}
return func.HttpResponse(body=json.dumps(response, indent=2), status_code=200)
handler.run(event, context)
if type(input) is HttpRequest:
return func.HttpResponse("OK")
# Need to manually initialize c7n_azure
entry.initialize_azure()
# flake8: noqa
|
{
"content_hash": "2e621b5d44c135a904a319dcbdce3ad7",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 90,
"avg_line_length": 26.020833333333332,
"alnum_prop": 0.6533226581265013,
"repo_name": "taohungyang/cloud-custodian",
"id": "f08b50ea400ade0d6ea2a24da1b2ae4c189e01bd",
"size": "1835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/c7n_azure/function.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "517"
},
{
"name": "Go",
"bytes": "131325"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "10146"
},
{
"name": "Python",
"bytes": "3444793"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
}
|
"""
Gauged - https://github.com/chriso/gauged
Copyright 2014 (c) Chris O'Hara <cohara87@gmail.com>
"""
from random import random
from math import floor, ceil
from time import time
from calendar import timegm
from datetime import datetime, timedelta
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from gauged import Gauged
def abbreviate(suffixes, cutoff):
def abbreviate_(number, decimals=1):
position = 0
while position < len(suffixes) and abs(number) >= cutoff:
number = round(number / cutoff, decimals)
position += 1
if floor(number) == ceil(number):
number = int(number)
return str(number) + suffixes[position]
return abbreviate_
abbreviate_number = abbreviate(['', 'K', 'M', 'B'], 1000)
abbreviate_bytes = abbreviate(['B', 'KB', 'MB', 'GB', 'TB'], 1024)
# Parse CLI options
benchmark = ArgumentParser(usage='%(prog)s [OPTIONS]',
formatter_class=ArgumentDefaultsHelpFormatter)
benchmark.add_argument(
'-n', '--number', type=int, default=1000000,
help='How many measurements to store')
benchmark.add_argument(
'-t', '--days', type=int, default=365,
help='How many days to spread the measurements over')
benchmark.add_argument(
'-d', '--driver', default='sqlite://',
help='Where to store the data (defaults to SQLite in-memory)')
benchmark.add_argument(
'-b', '--block-size', type=int, default=Gauged.DAY,
help='The block size to use')
benchmark.add_argument(
'-r', '--resolution', type=int, default=Gauged.SECOND,
help='The resolution to use')
options = vars(benchmark.parse_args())
# Setup the Gauged instance
gauged = Gauged(options['driver'], block_size=options['block_size'],
resolution=options['resolution'], key_overflow=Gauged.IGNORE,
gauge_nan=Gauged.IGNORE)
gauged.sync()
print 'Writing to %s (block_size=%s, resolution=%s)' % \
(options['driver'], options['block_size'], options['resolution'])
# Get the start and end timestamp
end = datetime.now()
start = end - timedelta(days=options['days'])
start_timestamp = timegm(start.timetuple())
end_timestamp = timegm(end.timetuple())
number = abbreviate_number(options['number'])
print 'Spreading %s measurements to key "foobar" over %s days' % \
(number, options['days'])
# Benchmark writes
measurements = options['number']
span = end_timestamp - start_timestamp
start = time()
with gauged.writer as writer:
data = ['foobar', 0]
gauges = [data]
add = writer.add
for timestamp in xrange(start_timestamp, end_timestamp,
span // measurements):
data[1] = random()
add(gauges, timestamp=timestamp*1000)
elapsed = time() - start
print 'Wrote %s measurements in %s seconds (%s/s)' % \
(number, round(elapsed, 3), abbreviate_number(measurements / elapsed))
statistics = gauged.statistics()
byte_count = statistics.byte_count
print 'Gauge data uses %s (%s per measurement)' % \
(abbreviate_bytes(byte_count),
abbreviate_bytes(byte_count / float(measurements)))
# Read benchmarks
for aggregate in ('min', 'max', 'sum', 'count', 'mean', 'stddev', 'median'):
start = time()
gauged.aggregate('foobar', aggregate)
elapsed = time() - start
print '%s() in %ss (read %s measurements/s)' % \
(aggregate, round(elapsed, 3),
abbreviate_number(measurements / elapsed))
|
{
"content_hash": "1cb3de80c21a4fee5b842299cf19b1c4",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 77,
"avg_line_length": 34.64646464646464,
"alnum_prop": 0.6626822157434402,
"repo_name": "chriso/gauged",
"id": "0b72a52cf795191aa08ac8035144125002a0f981",
"size": "3430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "60061"
},
{
"name": "Makefile",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "190561"
}
],
"symlink_target": ""
}
|
"""Tests for layer graphs construction & handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras import initializers
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import models
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import functional
from tensorflow.python.keras.engine import input_layer as input_layer_lib
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training as training_lib
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.training.tracking.util import Checkpoint
try:
import yaml # pylint:disable=g-import-not-at-top
except ImportError:
yaml = None
class NetworkConstructionTest(keras_parameterized.TestCase):
def test_default_model_name(self):
inputs = input_layer_lib.Input(shape=(1,))
outputs = layers.Dense(1, activation='relu')(inputs)
model = training_lib.Model(inputs=inputs, outputs=outputs)
self.assertEqual(model.name, 'model')
model_2 = training_lib.Model(inputs=inputs, outputs=outputs)
self.assertEqual(model_2.name, 'model_1')
model_3 = training_lib.Model(inputs=inputs, outputs=outputs)
self.assertEqual(model_3.name, 'model_2')
def test_get_updates(self):
class MyLayer(layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(1, 1),
'float32',
trainable=False)
self.b = self.add_variable('b',
(1, 1),
'float32',
trainable=False)
self.add_update(state_ops.assign_add(self.a, [[1.]],
name='unconditional_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.b, inputs,
name='conditional_update'),
inputs=True)
return inputs + 1
with ops.Graph().as_default():
x1 = input_layer_lib.Input(shape=(1,))
layer = MyLayer()
_ = layer(x1)
self.assertEqual(len(layer.updates), 2)
x2 = input_layer_lib.Input(shape=(1,))
y2 = layer(x2)
self.assertEqual(len(layer.updates), 3)
network = functional.Functional(x2, y2)
self.assertEqual(len(network.updates), 3)
x3 = input_layer_lib.Input(shape=(1,))
_ = layer(x3)
self.assertEqual(len(network.updates), 4)
x4 = input_layer_lib.Input(shape=(1,))
_ = network(x4)
self.assertEqual(len(network.updates), 5)
network.add_update(state_ops.assign_add(layer.a, [[1]]))
self.assertEqual(len(network.updates), 6)
network.add_update(state_ops.assign_add(layer.b, x4), inputs=True)
self.assertEqual(len(network.updates), 7)
@combinations.generate(combinations.combine(mode=['graph']))
def test_get_updates_bn(self):
x1 = input_layer_lib.Input(shape=(1,))
layer = layers.BatchNormalization()
_ = layer(x1)
self.assertEqual(len(layer.updates), 2)
def test_get_layer(self):
# create a simple network
x = input_layer_lib.Input(shape=(32,))
dense_a = layers.Dense(4, name='dense_a')
dense_b = layers.Dense(2, name='dense_b')
y = dense_b(dense_a(x))
network = functional.Functional(x, y, name='dense_network')
# test various get_layer by index
self.assertEqual(network.get_layer(index=1), dense_a)
# test invalid get_layer by index
with self.assertRaisesRegex(
ValueError, 'Was asked to retrieve layer at index ' + str(3) +
' but model only has ' + str(len(network.layers)) + ' layers.'):
network.get_layer(index=3)
# test that only one between name and index is requested
with self.assertRaisesRegex(ValueError,
'Provide only a layer name or a layer index'):
network.get_layer(index=1, name='dense_b')
# test that a name or an index must be provided
with self.assertRaisesRegex(ValueError,
'Provide either a layer name or layer index.'):
network.get_layer()
# test various get_layer by name
self.assertEqual(network.get_layer(name='dense_a'), dense_a)
# test invalid get_layer by name
with self.assertRaisesRegex(ValueError, 'No such layer: dense_c.'):
network.get_layer(name='dense_c')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testTopologicalAttributes(self):
# test layer attributes / methods related to cross-layer connectivity.
a = input_layer_lib.Input(shape=(32,), name='input_a')
b = input_layer_lib.Input(shape=(32,), name='input_b')
# test input, output, input_shape, output_shape
test_layer = layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertIs(test_layer.input, a)
self.assertIs(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
# test `get_*_at` methods
dense = layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertIs(dense.get_input_at(0), a)
self.assertIs(dense.get_input_at(1), b)
self.assertIs(dense.get_output_at(0), a_2)
self.assertIs(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
# Test invalid value for attribute retrieval.
with self.assertRaises(ValueError):
dense.get_input_at(2)
with self.assertRaises(AttributeError):
new_dense = layers.Dense(16)
_ = new_dense.input
with self.assertRaises(AttributeError):
new_dense = layers.Dense(16)
_ = new_dense.output
with self.assertRaises(AttributeError):
new_dense = layers.Dense(16)
_ = new_dense.output_shape
with self.assertRaises(AttributeError):
new_dense = layers.Dense(16)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = layers.Dense(16)
a = input_layer_lib.Input(shape=(3, 32))
a = input_layer_lib.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = layers.Dense(16)
a = input_layer_lib.Input(shape=(3, 32))
a = input_layer_lib.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.output_shape
def _assertAllIs(self, a, b):
self.assertTrue(all(x is y for x, y in zip(a, b)))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testTopologicalAttributesMultiOutputLayer(self):
class PowersLayer(layers.Layer):
def call(self, inputs):
return [inputs**2, inputs**3]
x = input_layer_lib.Input(shape=(32,))
test_layer = PowersLayer()
p1, p2 = test_layer(x) # pylint: disable=not-callable
self.assertIs(test_layer.input, x)
self._assertAllIs(test_layer.output, [p1, p2])
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testTopologicalAttributesMultiInputLayer(self):
class AddLayer(layers.Layer):
def call(self, inputs):
assert len(inputs) == 2
return inputs[0] + inputs[1]
a = input_layer_lib.Input(shape=(32,))
b = input_layer_lib.Input(shape=(32,))
test_layer = AddLayer()
y = test_layer([a, b]) # pylint: disable=not-callable
self._assertAllIs(test_layer.input, [a, b])
self.assertIs(test_layer.output, y)
self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)])
self.assertEqual(test_layer.output_shape, (None, 32))
def testBasicNetwork(self):
with ops.Graph().as_default():
# minimum viable network
x = input_layer_lib.Input(shape=(32,))
dense = layers.Dense(2)
y = dense(x)
network = functional.Functional(x, y, name='dense_network')
# test basic attributes
self.assertEqual(network.name, 'dense_network')
self.assertEqual(len(network.layers), 2) # InputLayer + Dense
self.assertEqual(network.layers[1], dense)
self._assertAllIs(network.weights, dense.weights)
self._assertAllIs(network.trainable_weights, dense.trainable_weights)
self._assertAllIs(network.non_trainable_weights,
dense.non_trainable_weights)
# test callability on Input
x_2 = input_layer_lib.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 2])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 2])
# test network `trainable` attribute
network.trainable = False
self._assertAllIs(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, [])
self._assertAllIs(network.non_trainable_weights,
dense.trainable_weights + dense.non_trainable_weights)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_trainable_weights(self):
a = layers.Input(shape=(2,))
b = layers.Dense(1)(a)
model = training_lib.Model(a, b)
weights = model.weights
self._assertAllIs(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self._assertAllIs(model.non_trainable_weights, weights)
model.trainable = True
self._assertAllIs(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[1].trainable = False
self.assertListEqual(model.trainable_weights, [])
self._assertAllIs(model.non_trainable_weights, weights)
# sequential model
model = sequential.Sequential()
model.add(layers.Dense(1, input_dim=2))
weights = model.weights
self._assertAllIs(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self._assertAllIs(model.non_trainable_weights, weights)
model.trainable = True
self._assertAllIs(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[0].trainable = False
self.assertListEqual(model.trainable_weights, [])
self._assertAllIs(model.non_trainable_weights, weights)
def test_layer_call_arguments(self):
with ops.Graph().as_default():
# Test the ability to pass and serialize arguments to `call`.
inp = layers.Input(shape=(2,))
x = layers.Dense(3)(inp)
x = layers.Dropout(0.5)(x, training=True)
model = training_lib.Model(inp, x)
# Would be `dropout/cond/Merge` by default
self.assertIn('dropout', model.output.op.name)
# Test that argument is kept when applying the model
inp2 = layers.Input(shape=(2,))
out2 = model(inp2)
self.assertIn('dropout', out2.op.name)
# Test that argument is kept after loading a model
config = model.get_config()
model = training_lib.Model.from_config(config)
self.assertIn('dropout', model.output.op.name)
def test_node_construction(self):
# test basics
a = layers.Input(shape=(32,), name='input_a')
b = layers.Input(shape=(32,), name='input_b')
with self.assertRaises(ValueError):
_ = layers.Input(shape=(32,), batch_shape=(10, 32))
with self.assertRaises(ValueError):
_ = layers.Input(shape=(32,), unknown_kwarg=None)
self.assertListEqual(a.shape.as_list(), [None, 32])
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, _, _ = b._keras_history
self.assertEqual(len(a_layer._inbound_nodes), 1)
self.assertEqual(a_tensor_index, 0)
node = a_layer._inbound_nodes[a_node_index]
self.assertEqual(node.outbound_layer, a_layer)
self.assertListEqual(node.inbound_layers, [])
self.assertListEqual(node.input_tensors, [a])
self.assertListEqual(node.input_shapes, [(None, 32)])
self.assertListEqual(node.output_tensors, [a])
self.assertListEqual(node.output_shapes, [(None, 32)])
dense = layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(len(dense._inbound_nodes), 2)
self.assertEqual(len(dense._outbound_nodes), 0)
self.assertEqual(dense._inbound_nodes[0].inbound_layers, a_layer)
self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense)
self.assertEqual(dense._inbound_nodes[1].inbound_layers, b_layer)
self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense)
self.assertIs(dense._inbound_nodes[0].input_tensors, a)
self.assertIs(dense._inbound_nodes[1].input_tensors, b)
# test layer properties
test_layer = layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertListEqual(test_layer.kernel.shape.as_list(), [32, 16])
self.assertIs(test_layer.input, a)
self.assertIs(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
self.assertIs(dense.get_input_at(0), a)
self.assertIs(dense.get_input_at(1), b)
self.assertIs(dense.get_output_at(0), a_2)
self.assertIs(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
self.assertEqual(dense.get_input_mask_at(0), None)
self.assertEqual(dense.get_input_mask_at(1), None)
self.assertEqual(dense.get_output_mask_at(0), None)
self.assertEqual(dense.get_output_mask_at(1), None)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_multi_input_layer(self):
with self.cached_session():
# test multi-input layer
a = layers.Input(shape=(32,), name='input_a')
b = layers.Input(shape=(32,), name='input_b')
dense = layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = layers.concatenate([a_2, b_2], name='merge')
self.assertListEqual(merged.shape.as_list(), [None, 16 * 2])
merge_layer, merge_node_index, merge_tensor_index = merged._keras_history
self.assertEqual(merge_node_index, 0)
self.assertEqual(merge_tensor_index, 0)
self.assertEqual(len(merge_layer._inbound_nodes), 1)
self.assertEqual(len(merge_layer._outbound_nodes), 0)
self.assertEqual(len(merge_layer._inbound_nodes[0].input_tensors), 2)
self.assertEqual(len(merge_layer._inbound_nodes[0].inbound_layers), 2)
c = layers.Dense(64, name='dense_2')(merged)
d = layers.Dense(5, name='dense_3')(c)
model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model')
self.assertEqual(len(model.layers), 6)
output_shapes = model.compute_output_shape([(None, 32), (None, 32)])
self.assertListEqual(output_shapes[0].as_list(), [None, 64])
self.assertListEqual(output_shapes[1].as_list(), [None, 5])
self.assertListEqual(
model.compute_mask([a, b], [None, None]), [None, None])
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([l.name for l in model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in model._output_layers],
['dense_2', 'dense_3'])
# actually run model
fn = backend.function(model.inputs, model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
# test get_source_inputs
self._assertAllIs(layer_utils.get_source_inputs(c), [a, b])
# serialization / deserialization
json_config = model.to_json()
recreated_model = models.model_from_json(json_config)
recreated_model.compile('rmsprop', 'mse')
self.assertListEqual([l.name for l in recreated_model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in recreated_model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in recreated_model._output_layers],
['dense_2', 'dense_3'])
fn = backend.function(recreated_model.inputs, recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
def test_multi_output_layer_output_names(self):
inp = layers.Input(name='inp', shape=(None,), dtype=dtypes.float32)
class _MultiOutput(layers.Layer):
def call(self, x):
return x + 1., x + 2.
out = _MultiOutput(name='out')(inp)
model = training_lib.Model(inp, out)
self.assertEqual(['out', 'out_1'], model.output_names)
self.assertAllClose([2., 3.], model(1.))
def test_recursion(self):
with ops.Graph().as_default(), self.cached_session():
a = layers.Input(shape=(32,), name='input_a')
b = layers.Input(shape=(32,), name='input_b')
dense = layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = layers.concatenate([a_2, b_2], name='merge')
c = layers.Dense(64, name='dense_2')(merged)
d = layers.Dense(5, name='dense_3')(c)
model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model')
e = layers.Input(shape=(32,), name='input_e')
f = layers.Input(shape=(32,), name='input_f')
self.assertEqual(len(model.inputs), 2)
g, h = model([e, f])
self.assertEqual(len(model.inputs), 2)
self.assertEqual(g.name, 'model/dense_2/BiasAdd:0')
self.assertListEqual(g.shape.as_list(), c.shape.as_list())
self.assertListEqual(h.shape.as_list(), d.shape.as_list())
# test separate manipulation of different layer outputs
i = layers.Dense(7, name='dense_4')(h)
final_model = training_lib.Model(
inputs=[e, f], outputs=[i, g], name='final')
self.assertEqual(len(final_model.inputs), 2)
self.assertEqual(len(final_model.outputs), 2)
self.assertEqual(len(final_model.layers), 4)
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([layer.name for layer in final_model.layers][2:],
['model', 'dense_4'])
self.assertListEqual(
model.compute_mask([e, f], [None, None]), [None, None])
self.assertListEqual(
final_model.compute_output_shape([(10, 32), (10, 32)]), [(10, 7),
(10, 64)])
# run recursive model
fn = backend.function(final_model.inputs, final_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
# test serialization
model_config = final_model.get_config()
recreated_model = models.Model.from_config(model_config)
fn = backend.function(recreated_model.inputs, recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_multi_input_multi_output_recursion(self):
with self.cached_session():
# test multi-input multi-output
a = layers.Input(shape=(32,), name='input_a')
b = layers.Input(shape=(32,), name='input_b')
dense = layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = layers.concatenate([a_2, b_2], name='merge')
c = layers.Dense(64, name='dense_2')(merged)
d = layers.Dense(5, name='dense_3')(c)
model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model')
j = layers.Input(shape=(32,), name='input_j')
k = layers.Input(shape=(32,), name='input_k')
_, n = model([j, k])
o = layers.Input(shape=(32,), name='input_o')
p = layers.Input(shape=(32,), name='input_p')
q, _ = model([o, p])
self.assertListEqual(n.shape.as_list(), [None, 5])
self.assertListEqual(q.shape.as_list(), [None, 64])
s = layers.concatenate([n, q], name='merge_nq')
self.assertListEqual(s.shape.as_list(), [None, 64 + 5])
# test with single output as 1-elem list
multi_io_model = training_lib.Model([j, k, o, p], [s])
fn = backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test with single output as tensor
multi_io_model = training_lib.Model([j, k, o, p], s)
fn = backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test serialization
model_config = multi_io_model.get_config()
recreated_model = models.Model.from_config(model_config)
fn = backend.function(recreated_model.inputs, recreated_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
config = model.get_config()
models.Model.from_config(config)
model.summary()
json_str = model.to_json()
models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
models.model_from_yaml(yaml_str)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_invalid_graphs(self):
a = layers.Input(shape=(32,), name='input_a')
b = layers.Input(shape=(32,), name='input_b')
dense = layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = layers.concatenate([a_2, b_2], name='merge')
c = layers.Dense(64, name='dense_2')(merged)
d = layers.Dense(5, name='dense_3')(c)
model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model')
# input is not an Input tensor
j = layers.Input(shape=(32,), name='input_j')
j = layers.Dense(32)(j)
k = layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
training_lib.Model([j, k], [m, n])
# disconnected graph
j = layers.Input(shape=(32,), name='input_j')
k = layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
training_lib.Model([j], [m, n])
# redundant outputs
j = layers.Input(shape=(32,), name='input_j')
k = layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
training_lib.Model([j, k], [m, n, n])
# redundant inputs
j = layers.Input(shape=(32,), name='input_j')
k = layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
training_lib.Model([j, k, j], [m, n])
# i have not idea what I'm doing: garbage as inputs/outputs
j = layers.Input(shape=(32,), name='input_j')
k = layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
training_lib.Model([j, k], [m, n, 0])
def test_raw_tf_compatibility(self):
with ops.Graph().as_default():
# test calling layers/models on TF tensors
a = layers.Input(shape=(32,), name='input_a')
b = layers.Input(shape=(32,), name='input_b')
dense = layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = layers.concatenate([a_2, b_2], name='merge')
c = layers.Dense(64, name='dense_2')(merged)
d = layers.Dense(5, name='dense_3')(c)
model = training_lib.Model(inputs=[a, b], outputs=[c, d], name='model')
j = layers.Input(shape=(32,), name='input_j')
k = layers.Input(shape=(32,), name='input_k')
self.assertEqual(len(model.inputs), 2)
m, n = model([j, k])
self.assertEqual(len(model.inputs), 2)
tf_model = training_lib.Model([j, k], [m, n])
j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
m_tf, n_tf = tf_model([j_tf, k_tf])
self.assertListEqual(m_tf.shape.as_list(), [None, 64])
self.assertListEqual(n_tf.shape.as_list(), [None, 5])
# test merge
layers.concatenate([j_tf, k_tf], axis=1)
layers.add([j_tf, k_tf])
# test tensor input
x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32)
layers.InputLayer(input_tensor=x)
x = layers.Input(tensor=x)
layers.Dense(2)(x)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_basic_masking(self):
a = layers.Input(shape=(10, 32), name='input_a')
b = layers.Masking()(a)
model = training_lib.Model(a, b)
self.assertEqual(model.output_mask.shape.as_list(), [None, 10])
def testMaskingSingleInput(self):
class MaskedLayer(layers.Layer):
def call(self, inputs, mask=None):
if mask is not None:
return inputs * mask
return inputs
def compute_mask(self, inputs, mask=None):
return array_ops.ones_like(inputs)
if context.executing_eagerly():
a = constant_op.constant([2] * 32)
mask = constant_op.constant([0, 1] * 16)
a._keras_mask = mask
b = MaskedLayer().apply(a)
self.assertTrue(hasattr(b, '_keras_mask'))
self.assertAllEqual(
self.evaluate(array_ops.ones_like(mask)),
self.evaluate(getattr(b, '_keras_mask')))
self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b))
else:
x = input_layer_lib.Input(shape=(32,))
y = MaskedLayer()(x) # pylint: disable=not-callable
network = functional.Functional(x, y)
# test callability on Input
x_2 = input_layer_lib.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 32])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.shape.as_list(), [None, 32])
def test_activity_regularization_with_model_composition(self):
def reg(x):
return math_ops.reduce_sum(x)
net_a_input = input_layer_lib.Input((2,))
net_a = net_a_input
net_a = layers.Dense(
2, kernel_initializer='ones', use_bias=False, activity_regularizer=reg)(
net_a)
model_a = training_lib.Model([net_a_input], [net_a])
net_b_input = input_layer_lib.Input((2,))
net_b = model_a(net_b_input)
model_b = training_lib.Model([net_b_input], [net_b])
model_b.compile(optimizer='sgd', loss=None)
x = np.ones((1, 2))
loss = model_b.evaluate(x)
self.assertEqual(loss, 4.)
@combinations.generate(combinations.keras_mode_combinations())
def test_layer_sharing_at_heterogenous_depth(self):
x_val = np.random.random((10, 5))
x = input_layer_lib.Input(shape=(5,))
a = layers.Dense(5, name='A')
b = layers.Dense(5, name='B')
output = a(b(a(b(x))))
m = training_lib.Model(x, output)
m.run_eagerly = testing_utils.should_run_eagerly()
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
@combinations.generate(combinations.keras_mode_combinations())
def test_layer_sharing_at_heterogenous_depth_with_concat(self):
input_shape = (16, 9, 3)
input_layer = input_layer_lib.Input(shape=input_shape)
a = layers.Dense(3, name='dense_A')
b = layers.Dense(3, name='dense_B')
c = layers.Dense(3, name='dense_C')
x1 = b(a(input_layer))
x2 = a(c(input_layer))
output = layers.concatenate([x1, x2])
m = training_lib.Model(inputs=input_layer, outputs=output)
m.run_eagerly = testing_utils.should_run_eagerly()
x_val = np.random.random((10, 16, 9, 3))
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
@combinations.generate(combinations.keras_mode_combinations())
def test_explicit_training_argument(self):
a = layers.Input(shape=(2,))
b = layers.Dropout(0.5)(a)
base_model = training_lib.Model(a, b)
a = layers.Input(shape=(2,))
b = base_model(a, training=False)
model = training_lib.Model(a, b)
x = np.ones((100, 2))
y = np.ones((100, 2))
model.compile(
optimizer='sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0) # In inference mode, output is equal to input.
a = layers.Input(shape=(2,))
b = base_model(a, training=True)
model = training_lib.Model(a, b)
preds = model.predict(x)
self.assertEqual(np.min(preds), 0.) # At least one unit was dropped.
@combinations.generate(combinations.keras_mode_combinations())
def test_mask_derived_from_keras_layer(self):
inputs = input_layer_lib.Input((5, 10))
mask = input_layer_lib.Input((5,))
outputs = layers.RNN(layers.LSTMCell(100))(inputs, mask=mask)
model = training_lib.Model([inputs, mask], outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[np.ones((10, 5, 10)), np.zeros((10, 5))],
y=np.zeros((10, 100)),
batch_size=2)
# All data is masked, returned values are 0's.
self.assertEqual(history.history['loss'][0], 0.0)
history = model.fit(
x=[np.ones((10, 5, 10)), np.ones((10, 5))],
y=np.zeros((10, 100)),
batch_size=2)
# Data is not masked, returned values are random.
self.assertGreater(history.history['loss'][0], 0.0)
model = training_lib.Model.from_config(model.get_config())
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[np.ones((10, 5, 10)), np.zeros((10, 5))],
y=np.zeros((10, 100)),
batch_size=2)
# All data is masked, returned values are 0's.
self.assertEqual(history.history['loss'][0], 0.0)
history = model.fit(
x=[np.ones((10, 5, 10)), np.ones((10, 5))],
y=np.zeros((10, 100)),
batch_size=2)
# Data is not masked, returned values are random.
self.assertGreater(history.history['loss'][0], 0.0)
@combinations.generate(combinations.keras_mode_combinations())
def test_call_arg_derived_from_keras_layer(self):
class MyAdd(layers.Layer):
def call(self, x1, x2):
return x1 + x2
input1 = input_layer_lib.Input(10)
input2 = input_layer_lib.Input(10)
outputs = MyAdd()(input1, input2)
model = training_lib.Model([input1, input2], outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],
y=10 * np.ones((10, 10)),
batch_size=2)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
# Check serialization.
model = training_lib.Model.from_config(
model.get_config(), custom_objects={'MyAdd': MyAdd})
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],
y=10 * np.ones((10, 10)),
batch_size=2)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
@combinations.generate(combinations.times(
combinations.keras_mode_combinations(mode='eager'),
combinations.combine(use_keras_tensors=False)))
def test_only_some_in_first_arg_derived_from_keras_layer(self):
class MyAddAll(layers.Layer):
def call(self, inputs):
x = inputs[0]
for inp in inputs[1:]:
if inp is not None:
x = x + inp
return x
input1 = input_layer_lib.Input(10)
input2 = input_layer_lib.Input(10)
layer = MyAddAll()
with self.assertRaisesRegexp(ValueError, 'construct a functional'):
layer([0.0, input1, None, input2, None])
@combinations.generate(combinations.times(
combinations.keras_mode_combinations(mode='eager'),
combinations.combine(use_keras_tensors=True)))
def test_only_some_in_first_arg_derived_from_keras_layer_keras_tensors(self):
# This functionality is unsupported in v1 graphs
class MyAddAll(layers.Layer):
def call(self, inputs):
x = inputs[0]
for inp in inputs[1:]:
if inp is not None:
x = x + inp
return x
input1 = input_layer_lib.Input(10)
input2 = input_layer_lib.Input(10)
layer = MyAddAll()
outputs = layer([0.0, input1, None, input2, None])
model = training_lib.Model([input1, input2], outputs)
self.assertIn(layer, model.layers)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],
y=10 * np.ones((10, 10)),
batch_size=2)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
# Check serialization.
model = training_lib.Model.from_config(
model.get_config(), custom_objects={'MyAddAll': MyAddAll})
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],
y=10 * np.ones((10, 10)),
batch_size=2)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
@combinations.generate(
combinations.times(
combinations.keras_mode_combinations(),
combinations.combine(share_already_used_layer=[True, False])))
def test_call_kwarg_derived_from_keras_layer(self, share_already_used_layer):
class MaybeAdd(layers.Layer):
def call(self, x1, x2=None):
if x2 is not None:
return x1 + x2
return x1
class IdentityLayer(layers.Layer):
def call(self, x):
return x
input1 = input_layer_lib.Input(10)
input2 = input_layer_lib.Input(10)
identity_layer = IdentityLayer()
if share_already_used_layer:
# We have had model serialization/deserialization break in the past:
# when a layer was previously used to construct other functional models
# and had a non-empty list of inbound nodes before being used to define
# the model being serialized/deserialized.
# (The serialization/deserialization was not correctly adjusting
# the node_index serialization/deserialization).
# So, we explicitly test this case.
training_lib.Model([input1], identity_layer(input1))
outputs = MaybeAdd()(input1, x2=identity_layer(input2))
model = training_lib.Model([input1, input2], outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],
y=10 * np.ones((10, 10)),
batch_size=2)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
model = training_lib.Model.from_config(
model.get_config(),
custom_objects={
'MaybeAdd': MaybeAdd,
'IdentityLayer': IdentityLayer
})
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],
y=10 * np.ones((10, 10)),
batch_size=2)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
@combinations.generate(combinations.keras_mode_combinations())
def test_call_kwarg_dtype_serialization(self):
class Double(layers.Layer):
def call(self, x1, dtype=None):
return math_ops.cast(x1 + x1, dtype=dtype)
input1 = input_layer_lib.Input(10)
outputs = Double()(input1, dtype=dtypes.float16)
model = training_lib.Model([input1], outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10))],
y=6 * np.ones((10, 10)),
batch_size=2)
# Check that input was correctly doubled.
self.assertEqual(history.history['loss'][0], 0.0)
# Check the output dtype
self.assertEqual(model(array_ops.ones((3, 10))).dtype, dtypes.float16)
model = training_lib.Model.from_config(
model.get_config(), custom_objects={'Double': Double})
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10))],
y=6 * np.ones((10, 10)),
batch_size=2)
# Check that input was correctly doubled.
self.assertEqual(history.history['loss'][0], 0.0)
# Check the output dtype
self.assertEqual(model(array_ops.ones((3, 10))).dtype, dtypes.float16)
@combinations.generate(combinations.keras_mode_combinations())
def test_call_kwarg_nonserializable(self):
class Double(layers.Layer):
def call(self, x1, kwarg=None):
return x1 + x1
class NonSerializable(object):
def __init__(self, foo=None):
self.foo = foo
input1 = input_layer_lib.Input(10)
outputs = Double()(input1, kwarg=NonSerializable())
model = training_lib.Model([input1], outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[3 * np.ones((10, 10))],
y=6 * np.ones((10, 10)),
batch_size=2)
# Check that input was correctly doubled.
self.assertEqual(history.history['loss'][0], 0.0)
with self.assertRaisesRegex(
TypeError, 'Layer double was passed non-JSON-serializable arguments.'):
model.get_config()
@combinations.generate(
combinations.times(
combinations.keras_mode_combinations(),
combinations.keras_tensor_combinations(),
combinations.combine(share_already_used_layer=[True, False])))
def test_call_kwarg_derived_from_keras_layer_and_first_arg_is_constant(
self, share_already_used_layer):
class IdentityLayer(layers.Layer):
def call(self, x):
return x
class MaybeAdd(layers.Layer):
def call(self, x1, x2=None):
if x2 is not None:
return x1 + x2
return x1
input2 = input_layer_lib.Input(10)
identity_layer = IdentityLayer()
if share_already_used_layer:
# We have had model serialization/deserialization break in the past:
# when a layer was previously used to construct other functional models
# and had a non-empty list of inbound nodes before being used to define
# the model being serialized/deserialized.
# (The serialization/deserialization was not correctly adjusting
# the node_index serialization/deserialization).
# So, we explicitly test this case.
training_lib.Model([input2], identity_layer(input2))
outputs = MaybeAdd()(3., x2=identity_layer(input2))
model = training_lib.Model([input2], outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=7 * np.ones((10, 10)),
y=10 * np.ones((10, 10)),
batch_size=2)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
model = training_lib.Model.from_config(
model.get_config(),
custom_objects={
'MaybeAdd': MaybeAdd,
'IdentityLayer': IdentityLayer
})
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=7 * np.ones((10, 10)),
y=10 * np.ones((10, 10)),
batch_size=2)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
@combinations.generate(combinations.keras_mode_combinations())
def test_composite_call_kwarg_derived_from_keras_layer(self):
# Create a test layer that accepts composite tensor inputs.
class MaybeAdd(layers.Layer):
def call(self, x1, x2=None):
# We need to convert this to a tensor for loss calculations -
# losses don't play nicely with ragged tensors yet.
if x2 is not None:
return (x1 + x2).to_tensor(default_value=0)
return x1.to_tensor(default_value=0)
input1 = input_layer_lib.Input((None,), ragged=True)
input2 = input_layer_lib.Input((None,), ragged=True)
outputs = MaybeAdd()(input1, x2=input2)
model = training_lib.Model([input1, input2], outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
input_data = [
ragged_factory_ops.constant([[3.0, 3.0], [3.0, 3.0], [3.0]]),
ragged_factory_ops.constant([[7.0, 7.0], [7.0, 7.0], [7.0]])
]
expected_data = np.array([[10.0, 10.0], [10.0, 10.0], [10.0, 0.0]])
history = model.fit(x=input_data, y=expected_data)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
model = training_lib.Model.from_config(
model.get_config(), custom_objects={'MaybeAdd': MaybeAdd})
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(x=input_data, y=expected_data)
# Check that second input was correctly added to first.
self.assertEqual(history.history['loss'][0], 0.0)
@combinations.generate(combinations.times(
combinations.keras_mode_combinations(mode='eager'),
combinations.keras_tensor_combinations()))
def test_call_some_not_all_nested_in_first_arg_derived_from_keras_layer(self):
# This functionality is unsupported in v1 graphs
class AddAll(layers.Layer):
def call(self, x1_x2, x3):
x1, x2 = x1_x2
out = x1 + x2
if x3 is not None:
for t in x3.values():
out += t
return out
input1 = input_layer_lib.Input(10)
input2 = input_layer_lib.Input(10)
input3 = input_layer_lib.Input(10)
layer = AddAll()
outputs = layer(
[input1, 4 * array_ops.ones((1, 10))],
x3={
'a': input2,
'b': input3,
'c': 5 * array_ops.ones((1, 10))
})
model = training_lib.Model([input1, input2, input3], outputs)
self.assertIn(layer, model.layers)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))],
y=15 * np.ones((10, 10)),
batch_size=2)
# Check that all inputs were correctly added.
self.assertEqual(history.history['loss'][0], 0.0)
model = training_lib.Model.from_config(
model.get_config(), custom_objects={'AddAll': AddAll})
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))],
y=15 * np.ones((10, 10)),
batch_size=2)
# Check that all inputs were correctly added.
self.assertEqual(history.history['loss'][0], 0.0)
@combinations.generate(combinations.keras_mode_combinations())
def test_call_nested_arg_derived_from_keras_layer(self):
class AddAll(layers.Layer):
def call(self, x1, x2, x3=None):
out = x1 + x2
if x3 is not None:
for t in x3.values():
out += t
return out
input1 = input_layer_lib.Input(10)
input2 = input_layer_lib.Input(10)
input3 = input_layer_lib.Input(10)
outputs = AddAll()(
input1,
4 * array_ops.ones((1, 10)),
x3={
'a': input2,
'b': input3,
'c': 5 * array_ops.ones((1, 10))
})
model = training_lib.Model([input1, input2, input3], outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))],
y=15 * np.ones((10, 10)),
batch_size=2)
# Check that all inputs were correctly added.
self.assertEqual(history.history['loss'][0], 0.0)
model = training_lib.Model.from_config(
model.get_config(), custom_objects={'AddAll': AddAll})
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(
x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))],
y=15 * np.ones((10, 10)),
batch_size=2)
# Check that all inputs were correctly added.
self.assertEqual(history.history['loss'][0], 0.0)
@combinations.generate(combinations.keras_mode_combinations())
def test_multi_output_model_with_none_masking(self):
def func(x):
return [x * 0.2, x * 0.3]
def output_shape(input_shape):
return [input_shape, input_shape]
i = layers.Input(shape=(3, 2, 1))
o = layers.Lambda(function=func, output_shape=output_shape)(i)
self.assertEqual(backend.int_shape(o[0]), (None, 3, 2, 1))
self.assertEqual(backend.int_shape(o[1]), (None, 3, 2, 1))
o = layers.add(o)
model = training_lib.Model(i, o)
model.run_eagerly = testing_utils.should_run_eagerly()
i2 = layers.Input(shape=(3, 2, 1))
o2 = model(i2)
model2 = training_lib.Model(i2, o2)
model2.run_eagerly = testing_utils.should_run_eagerly()
x = np.random.random((4, 3, 2, 1))
out = model2.predict(x)
assert out.shape == (4, 3, 2, 1)
self.assertAllClose(out, x * 0.2 + x * 0.3, atol=1e-4)
@combinations.generate(combinations.keras_mode_combinations())
def test_constant_initializer_with_numpy(self):
initializer = initializers.Constant(np.ones((3, 2)))
model = sequential.Sequential()
model.add(layers.Dense(2, input_shape=(3,), kernel_initializer=initializer))
model.add(layers.Dense(3))
model.compile(
loss='mse',
optimizer='sgd',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly())
json_str = model.to_json()
models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
models.model_from_yaml(yaml_str)
def test_subclassed_error_if_init_not_called(self):
class MyNetwork(training_lib.Model):
def __init__(self):
self._foo = [layers.Dense(10), layers.Dense(10)]
with self.assertRaisesRegex(RuntimeError, 'forgot to call'):
MyNetwork()
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_int_input_shape(self):
inputs = input_layer_lib.Input(10)
self.assertEqual([None, 10], inputs.shape.as_list())
inputs_with_batch = input_layer_lib.Input(batch_size=20, shape=5)
self.assertEqual([20, 5], inputs_with_batch.shape.as_list())
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_model_initialization(self):
# Functional model
inputs = input_layer_lib.Input(shape=(32,))
outputs = layers.Dense(4)(inputs)
with self.assertRaisesRegex(TypeError,
'Keyword argument not understood'):
model = training_lib.Model(
inputs, outputs, name='m', trainable=False, dtype='int64')
with self.assertRaisesRegex(TypeError,
'Keyword argument not understood'):
model = training_lib.Model(
inputs, outputs, name='m', trainable=False, dynamic=False)
model = training_lib.Model(inputs, outputs, name='m', trainable=False)
self.assertEqual('m', model.name)
self.assertFalse(model.trainable)
self.assertFalse(model.dynamic)
class SubclassModel(training_lib.Model):
pass
# Subclassed model
model = SubclassModel(
name='subclassed', trainable=True, dtype='int64', dynamic=True)
self.assertEqual('subclassed', model.name)
self.assertTrue(model.dynamic)
self.assertTrue(model.trainable)
w = model.add_weight('w', [], initializer=initializers.Constant(1))
self.assertEqual(dtypes.int64, w.dtype)
def test_disconnected_inputs(self):
input_tensor1 = input_layer_lib.Input(shape=[200], name='a')
input_tensor2 = input_layer_lib.Input(shape=[10], name='b')
output_tensor1 = layers.Dense(units=10)(input_tensor1)
net = functional.Functional(
inputs=[input_tensor1, input_tensor2], outputs=[output_tensor1])
net2 = functional.Functional.from_config(net.get_config())
self.assertLen(net2.inputs, 2)
self.assertEqual('a', net2.layers[0].name)
self.assertEqual('b', net2.layers[1].name)
@combinations.generate(combinations.keras_model_type_combinations())
def test_dependency_tracking(self):
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
model.trackable = Checkpoint()
self.assertIn('trackable', model._unconditional_dependency_names)
self.assertEqual(model.trackable, model._lookup_dependency('trackable'))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_model_construction_in_tf_function(self):
d = {'model': None}
@def_function.function
def fn(x):
if d['model'] is None:
# Check that Functional can be built in a `tf.function`.
inputs = input_layer_lib.Input(10)
outputs = layers.Dense(1)(inputs)
model = functional.Functional(inputs, outputs)
d['model'] = model
else:
model = d['model']
return model(x)
x = array_ops.ones((10, 10))
y = fn(x)
self.assertEqual(y.shape.as_list(), [10, 1])
class DeferredModeTest(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testSimpleNetworkBuilding(self):
inputs = input_layer_lib.Input(shape=(32,))
if context.executing_eagerly():
self.assertEqual(inputs.dtype.name, 'float32')
self.assertEqual(inputs.shape.as_list(), [None, 32])
x = layers.Dense(2)(inputs)
if context.executing_eagerly():
self.assertEqual(x.dtype.name, 'float32')
self.assertEqual(x.shape.as_list(), [None, 2])
outputs = layers.Dense(4)(x)
network = functional.Functional(inputs, outputs)
self.assertIsInstance(network, functional.Functional)
if context.executing_eagerly():
# It should be possible to call such a network on EagerTensors.
inputs = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
outputs = network(inputs)
self.assertEqual(outputs.shape.as_list(), [10, 4])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testMultiIONetworkBuilding(self):
input_a = input_layer_lib.Input(shape=(32,))
input_b = input_layer_lib.Input(shape=(16,))
a = layers.Dense(16)(input_a)
class AddLayer(layers.Layer):
def call(self, inputs):
return inputs[0] + inputs[1]
c = AddLayer()([a, input_b]) # pylint: disable=not-callable
c = layers.Dense(2)(c)
network = functional.Functional([input_a, input_b], [a, c])
if context.executing_eagerly():
a_val = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
b_val = constant_op.constant(
np.random.random((10, 16)).astype('float32'))
outputs = network([a_val, b_val])
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [10, 16])
self.assertEqual(outputs[1].shape.as_list(), [10, 2])
class DefaultShapeInferenceBehaviorTest(keras_parameterized.TestCase):
def _testShapeInference(self, model, input_shape, expected_output_shape):
input_value = np.random.random(input_shape)
output_value = model.predict(input_value)
self.assertEqual(output_value.shape, expected_output_shape)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testSingleInputCase(self):
class LayerWithOneInput(layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs):
return backend.dot(inputs, self.w)
inputs = input_layer_lib.Input(shape=(3,))
layer = LayerWithOneInput()
if context.executing_eagerly():
self.assertEqual(
layer.compute_output_shape((None, 3)).as_list(), [None, 4])
# As a side-effect, compute_output_shape builds the layer.
self.assertTrue(layer.built)
# We can still query the layer's compute_output_shape with compatible
# input shapes.
self.assertEqual(
layer.compute_output_shape((6, 3)).as_list(), [6, 4])
outputs = layer(inputs)
model = training_lib.Model(inputs, outputs)
self._testShapeInference(model, (2, 3), (2, 4))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testMultiInputOutputCase(self):
class MultiInputOutputLayer(layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs):
a = backend.dot(inputs[0], self.w)
b = a + inputs[1]
return [a, b]
input_a = input_layer_lib.Input(shape=(3,))
input_b = input_layer_lib.Input(shape=(4,))
output_a, output_b = MultiInputOutputLayer()([input_a, input_b])
model = training_lib.Model([input_a, input_b], [output_a, output_b])
output_a_val, output_b_val = model.predict(
[np.random.random((2, 3)), np.random.random((2, 4))])
self.assertEqual(output_a_val.shape, (2, 4))
self.assertEqual(output_b_val.shape, (2, 4))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testTrainingArgument(self):
class LayerWithTrainingArg(layers.Layer):
def build(self, input_shape):
self.w = array_ops.ones(shape=(3, 4))
def call(self, inputs, training):
return backend.dot(inputs, self.w)
inputs = input_layer_lib.Input(shape=(3,))
outputs = LayerWithTrainingArg()(inputs, training=False)
model = training_lib.Model(inputs, outputs)
self._testShapeInference(model, (2, 3), (2, 4))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testNoneInShape(self):
class Model(training_lib.Model):
def __init__(self):
super(Model, self).__init__()
self.conv1 = layers.Conv2D(8, 3)
self.pool = layers.GlobalAveragePooling2D()
self.fc = layers.Dense(3)
def call(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.fc(x)
return x
model = Model()
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input)
self.assertEqual(output.shape, (1, 3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testNoneInShapeWithCompoundModel(self):
class BasicBlock(training_lib.Model):
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = layers.Conv2D(8, 3)
self.pool = layers.GlobalAveragePooling2D()
self.dense = layers.Dense(3)
def call(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.dense(x)
return x
class CompoundModel(training_lib.Model):
def __init__(self):
super(CompoundModel, self).__init__()
self.block = BasicBlock()
def call(self, x):
x = self.block(x) # pylint: disable=not-callable
return x
model = CompoundModel()
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input) # pylint: disable=not-callable
self.assertEqual(output.shape, (1, 3))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testNoneInShapeWithFunctionalAPI(self):
class BasicBlock(training_lib.Model):
# Inheriting from layers.Layer since we are calling this layer
# inside a model created using functional API.
def __init__(self):
super(BasicBlock, self).__init__()
self.conv1 = layers.Conv2D(8, 3)
def call(self, x):
x = self.conv1(x)
return x
input_layer = layers.Input(shape=(None, None, 1))
x = BasicBlock()(input_layer)
x = layers.GlobalAveragePooling2D()(x)
output_layer = layers.Dense(3)(x)
model = training_lib.Model(inputs=input_layer, outputs=output_layer)
model.build(tensor_shape.TensorShape((None, None, None, 1)))
self.assertTrue(model.built, 'Model should be built')
self.assertTrue(model.weights,
'Model should have its weights created as it '
'has been built')
sample_input = array_ops.ones((1, 10, 10, 1))
output = model(sample_input)
self.assertEqual(output.shape, (1, 3))
@combinations.generate(combinations.keras_mode_combinations())
def test_sequential_as_downstream_of_masking_layer(self):
inputs = layers.Input(shape=(3, 4))
x = layers.Masking(mask_value=0., input_shape=(3, 4))(inputs)
s = sequential.Sequential()
s.add(layers.Dense(5, input_shape=(4,)))
x = layers.wrappers.TimeDistributed(s)(x)
model = training_lib.Model(inputs=inputs, outputs=x)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model_input = np.random.randint(
low=1, high=5, size=(10, 3, 4)).astype('float32')
for i in range(4):
model_input[i, i:, :] = 0.
model.fit(model_input,
np.random.random((10, 3, 5)), epochs=1, batch_size=6)
if not context.executing_eagerly():
# Note: this doesn't work in eager due to DeferredTensor/ops compatibility
# issue.
mask_outputs = [model.layers[1].compute_mask(model.layers[1].input)]
mask_outputs += [model.layers[2].compute_mask(
model.layers[2].input, mask_outputs[-1])]
func = backend.function([model.input], mask_outputs)
mask_outputs_val = func([model_input])
self.assertAllClose(mask_outputs_val[0], np.any(model_input, axis=-1))
self.assertAllClose(mask_outputs_val[1], np.any(model_input, axis=-1))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_external_keras_serialization_compat_input_layers(self):
inputs = input_layer_lib.Input(shape=(10,))
outputs = layers.Dense(1)(inputs)
model = training_lib.Model(inputs, outputs)
config = model.get_config()
# Checks that single inputs and outputs are still saved as 1-element lists.
# Saving as 1-element lists or not is equivalent in TF Keras, but only the
# 1-element list format is supported in TF.js and keras-team/Keras.
self.assertLen(config['input_layers'], 1)
self.assertLen(config['output_layers'], 1)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_external_keras_serialization_compat_inbound_nodes(self):
# Check single Tensor input.
inputs = input_layer_lib.Input(shape=(10,), name='in')
outputs = layers.Dense(1)(inputs)
model = training_lib.Model(inputs, outputs)
config = model.get_config()
self.assertEqual(config['layers'][1]['inbound_nodes'], [[['in', 0, 0, {}]]])
# Check multiple Tensor input.
inputs1 = input_layer_lib.Input(shape=(10,), name='in1')
inputs2 = input_layer_lib.Input(shape=(10,), name='in2')
outputs = layers.Add()([inputs1, inputs2])
model = training_lib.Model([inputs1, inputs2], outputs)
config = model.get_config()
self.assertEqual(config['layers'][2]['inbound_nodes'],
[[['in1', 0, 0, {}], ['in2', 0, 0, {}]]])
@combinations.generate(combinations.combine(mode=['eager']))
def test_dict_inputs_tensors(self):
# Note that this test is running with v2 eager only, since the v1
# will behave differently wrt to dict input for training.
inputs = {
'sentence2': input_layer_lib.Input(
shape=(), name='a', dtype=dtypes.string),
'sentence1': input_layer_lib.Input(
shape=(), name='b', dtype=dtypes.string),
}
strlen = layers.Lambda(string_ops.string_length_v2)
diff = layers.Subtract()(
[strlen(inputs['sentence1']), strlen(inputs['sentence2'])])
diff = math_ops.cast(diff, dtypes.float32)
model = training_lib.Model(inputs, diff)
extra_keys = {
'sentence1': constant_op.constant(['brown fox', 'lazy dog']),
'sentence2': constant_op.constant(['owl', 'cheeky cat']),
'label': constant_op.constant([0, 1]),
}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
model(extra_keys)
self.assertIn('ignored by the model', str(w[-1].message))
model.compile('sgd', 'mse')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
model.fit(extra_keys, y=constant_op.constant([0, 1]), steps_per_epoch=1)
self.assertIn('ignored by the model', str(w[-1].message))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
model.evaluate(extra_keys, constant_op.constant([0, 1]))
self.assertIn('ignored by the model', str(w[-1].message))
# Make sure the model inputs are sorted with the dict keys.
self.assertEqual(model.inputs[0]._keras_history.layer.name, 'b')
self.assertEqual(model.inputs[1]._keras_history.layer.name, 'a')
class GraphUtilsTest(test.TestCase):
def testGetReachableFromInputs(self):
with ops.Graph().as_default(), self.cached_session():
pl_1 = array_ops.placeholder(shape=None, dtype='float32')
pl_2 = array_ops.placeholder(shape=None, dtype='float32')
pl_3 = array_ops.placeholder(shape=None, dtype='float32')
x_1 = pl_1 + pl_2
x_2 = pl_2 * 2
x_3 = pl_3 + 1
x_4 = x_1 + x_2
x_5 = x_3 * pl_1
self.assertEqual(
tf_utils.get_reachable_from_inputs([pl_1]),
{pl_1, x_1, x_4, x_5, x_1.op, x_4.op, x_5.op})
self.assertEqual(
tf_utils.get_reachable_from_inputs([pl_1, pl_2]),
{pl_1, pl_2, x_1, x_2, x_4, x_5, x_1.op, x_2.op, x_4.op, x_5.op})
self.assertEqual(
tf_utils.get_reachable_from_inputs([pl_3]),
{pl_3, x_3, x_5, x_3.op, x_5.op})
self.assertEqual(
tf_utils.get_reachable_from_inputs([x_3]), {x_3, x_5, x_5.op})
class NestedNetworkTest(keras_parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_nested_inputs_network(self):
inputs = {
'x1': input_layer_lib.Input(shape=(1,)),
'x2': input_layer_lib.Input(shape=(1,))
}
outputs = layers.Add()([inputs['x1'], inputs['x2']])
network = functional.Functional(inputs, outputs)
network = functional.Functional.from_config(network.get_config())
result_tensor = network({
'x1': array_ops.ones((1, 1), 'float32'),
'x2': array_ops.ones((1, 1), 'float32')
})
result = self.evaluate(result_tensor)
self.assertAllEqual(result, [[2.]])
# TODO(b/122726584): Investigate why concrete batch is flaky in some builds.
output_shape = network.compute_output_shape({
'x1': (None, 1),
'x2': (None, 1)
})
self.assertListEqual(output_shape.as_list(), [None, 1])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_nested_outputs_network(self):
inputs = input_layer_lib.Input(shape=(1,))
outputs = {
'x+x': layers.Add()([inputs, inputs]),
'x*x': layers.Multiply()([inputs, inputs])
}
network = functional.Functional(inputs, outputs)
network = functional.Functional.from_config(network.get_config())
result_tensor = network(array_ops.ones((1, 1), 'float32'))
result = self.evaluate(result_tensor)
self.assertAllEqual(result['x+x'], [[2.]])
self.assertAllEqual(result['x*x'], [[1.]])
output_shape = network.compute_output_shape((None, 1))
self.assertListEqual(output_shape['x+x'].as_list(), [None, 1])
self.assertListEqual(output_shape['x*x'].as_list(), [None, 1])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_nested_network_inside_network(self):
inner_inputs = {
'x1': input_layer_lib.Input(shape=(1,)),
'x2': input_layer_lib.Input(shape=(1,))
}
inner_outputs = {
'x1+x2': layers.Add()([inner_inputs['x1'], inner_inputs['x2']]),
'x1*x2': layers.Multiply()([inner_inputs['x1'], inner_inputs['x2']])
}
inner_network = functional.Functional(
inner_inputs, inner_outputs)
inputs = [
input_layer_lib.Input(shape=(1,)),
input_layer_lib.Input(shape=(1,))
]
middle = inner_network({'x1': inputs[0], 'x2': inputs[1]})
outputs = layers.Add()([middle['x1+x2'], middle['x1*x2']])
network = functional.Functional(inputs, outputs)
network = functional.Functional.from_config(network.get_config())
# Computes: `(x1+x2) + (x1*x2)`
result_tensor = network(
[array_ops.ones((1, 1), 'float32'),
array_ops.ones((1, 1), 'float32')])
result = self.evaluate(result_tensor)
self.assertAllEqual(result, [[3.]])
output_shape = network.compute_output_shape([(None, 1), (None, 1)])
self.assertListEqual(output_shape.as_list(), [None, 1])
@combinations.generate(combinations.combine(mode=['graph']))
def test_updates_with_direct_call(self):
inputs = input_layer_lib.Input(shape=(10,))
x = layers.BatchNormalization()(inputs)
x = layers.Dense(10)(x)
model = training_lib.Model(inputs, x)
ph = backend.placeholder(shape=(10, 10))
model(ph)
self.assertLen(model.updates, 4)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_dict_mapping_input(self):
class ReturnFirst(layers.Layer):
def call(self, inputs):
b, _ = inputs
return b
# Checks that inputs are put in same order as the
# Model was constructed with.
b = input_layer_lib.Input(shape=(10,), name='b')
a = input_layer_lib.Input(shape=(10,), name='a')
outputs = ReturnFirst()([b, a])
b_val = array_ops.ones((10, 10))
a_val = array_ops.zeros((10, 10))
model = training_lib.Model([b, a], outputs)
res = model({'a': a_val, 'b': b_val})
self.assertAllClose(self.evaluate(res), self.evaluate(b_val))
reversed_model = training_lib.Model([a, b], outputs)
res = reversed_model({'a': a_val, 'b': b_val})
self.assertAllClose(self.evaluate(res), self.evaluate(b_val))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_dict_mapping_single_input(self):
b = input_layer_lib.Input(shape=(1,), name='b')
outputs = b * 2
model = training_lib.Model(b, outputs)
b_val = array_ops.ones((1, 1))
extra_val = array_ops.ones((1, 10))
inputs = {'a': extra_val, 'b': b_val}
res = model(inputs)
# Check that 'b' was used and 'a' was ignored.
self.assertEqual(res.shape.as_list(), [1, 1])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_nested_dict_mapping(self):
a = input_layer_lib.Input(shape=(1,), dtype='int32', name='a')
b = input_layer_lib.Input(shape=(1,), dtype='int32', name='b')
c = input_layer_lib.Input(shape=(1,), dtype='int32', name='c')
d = input_layer_lib.Input(shape=(1,), dtype='int32', name='d')
inputs = {'a': (a, b), 'c': (c, d)}
outputs = 1000 * a + 100 * b + 10 * c + d
model = training_lib.Model(inputs, outputs)
a_val = array_ops.ones((1, 1), dtype='int32')
b_val = 2 * array_ops.ones((1, 1), dtype='int32')
c_val = 3 * array_ops.ones((1, 1), dtype='int32')
d_val = 4 * array_ops.ones((1, 1), dtype='int32')
inputs_val = {'a': (a_val, b_val), 'c': (c_val, d_val)}
res = model(inputs_val)
# Check that inputs were flattened in the correct order.
self.assertFalse(model._enable_dict_to_input_mapping)
self.assertEqual(self.evaluate(res), [1234])
@combinations.generate(combinations.keras_mode_combinations())
class AddLossTest(keras_parameterized.TestCase):
def test_add_loss_outside_call_only_loss(self):
inputs = input_layer_lib.Input((10,))
mid = layers.Dense(10)(inputs)
outputs = layers.Dense(1)(mid)
model = training_lib.Model(inputs, outputs)
model.add_loss(math_ops.reduce_mean(outputs))
self.assertLen(model.losses, 1)
initial_weights = model.get_weights()
x = np.ones((10, 10))
model.compile(
'sgd',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, batch_size=2, epochs=1)
model2 = model.from_config(model.get_config())
model2.compile(
'sgd',
run_eagerly=testing_utils.should_run_eagerly())
model2.set_weights(initial_weights)
model2.fit(x, batch_size=2, epochs=1)
# The TFOpLayer and the AddLoss layer are serialized.
self.assertLen(model2.layers, 5)
self.assertAllClose(model.get_weights(), model2.get_weights())
def test_add_loss_outside_call_multiple_losses(self):
inputs = input_layer_lib.Input((10,))
x1 = layers.Dense(10)(inputs)
x2 = layers.Dense(10)(x1)
outputs = layers.Dense(1)(x2)
model = training_lib.Model(inputs, outputs)
model.add_loss(math_ops.reduce_sum(x1 * x2))
model.add_loss(math_ops.reduce_mean(outputs))
self.assertLen(model.losses, 2)
initial_weights = model.get_weights()
x, y = np.ones((10, 10)), np.ones((10, 1))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, batch_size=2, epochs=1)
model2 = model.from_config(model.get_config())
model2.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model2.set_weights(initial_weights)
model2.fit(x, y, batch_size=2, epochs=1)
self.assertAllClose(model.get_weights(), model2.get_weights())
def test_add_loss_crossentropy_backtracking(self):
inputs = input_layer_lib.Input((2,))
labels = input_layer_lib.Input((1,))
outputs = layers.Dense(1, activation='sigmoid')(inputs)
model = functional.Functional([inputs, labels], outputs)
model.add_loss(losses.binary_crossentropy(labels, outputs))
model.compile('adam')
x = np.random.random((2, 2))
y = np.random.random((2, 1))
model.fit([x, y])
inputs = input_layer_lib.Input((2,))
labels = input_layer_lib.Input((2,))
outputs = layers.Dense(2, activation='softmax')(inputs)
model = functional.Functional([inputs, labels], outputs)
model.add_loss(losses.categorical_crossentropy(labels, outputs))
model.compile('adam')
x = np.random.random((2, 2))
y = np.random.random((2, 2))
model.fit([x, y])
inputs = input_layer_lib.Input((2,))
labels = input_layer_lib.Input((1,), dtype='int32')
outputs = layers.Dense(2, activation='softmax')(inputs)
model = functional.Functional([inputs, labels], outputs)
model.add_loss(losses.sparse_categorical_crossentropy(labels, outputs))
model.compile('adam')
x = np.random.random((2, 2))
y = np.random.randint(0, 2, size=(2, 1))
model.fit([x, y])
@combinations.generate(combinations.keras_mode_combinations())
class WeightAccessTest(keras_parameterized.TestCase):
def test_functional_model(self):
inputs = input_layer_lib.Input((10,))
x1 = layers.Dense(10)(inputs)
x2 = layers.Dense(10)(x1)
outputs = layers.Dense(1)(x2)
model = training_lib.Model(inputs, outputs)
self.assertEqual(len(model.weights), 6)
def test_sequential_model_with_input_shape(self):
x1 = layers.Dense(10, input_shape=(10,))
x2 = layers.Dense(10)
x3 = layers.Dense(1)
model = sequential.Sequential([x1, x2, x3])
self.assertEqual(len(model.weights), 6)
def test_sequential_model_without_input_shape(self):
x1 = layers.Dense(10)
x2 = layers.Dense(10)
x3 = layers.Dense(1)
model = sequential.Sequential([x1, x2, x3])
with self.assertRaisesRegex(
ValueError, 'Weights for model .* have not yet been created'):
_ = model.weights
def test_subclass_model_with_build_method(self):
class SubclassModel(models.Model):
def build(self, input_shape):
self.w = self.add_weight(shape=input_shape[-1], initializer='ones')
def call(self, inputs):
return inputs * self.w
model = SubclassModel()
with self.assertRaisesRegex(
ValueError, 'Weights for model .* have not yet been created'):
_ = model.weights
model(input_layer_lib.Input((10,)))
self.assertEqual(len(model.weights), 1)
def test_subclass_model_without_build_method(self):
class SubclassModel(models.Model):
def __init__(self):
super(SubclassModel, self).__init__()
self.w = self.add_weight(shape=(), initializer='ones')
def call(self, inputs):
return inputs * self.w
model = SubclassModel()
self.assertEqual(len(model.weights), 1)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class DTypeTest(keras_parameterized.TestCase):
@testing_utils.enable_v2_dtype_behavior
def test_graph_network_dtype(self):
inputs = input_layer_lib.Input((10,))
outputs = layers.Dense(10)(inputs)
network = functional.Functional(inputs, outputs)
self.assertEqual(network.dtype, 'float32')
@testing_utils.enable_v2_dtype_behavior
def test_subclassed_network_dtype(self):
class IdentityNetwork(training_lib.Model):
def call(self, inputs):
return inputs
network = IdentityNetwork()
self.assertEqual(network.dtype, 'float32')
self.assertEqual(network(array_ops.constant(1, 'float64')).dtype, 'float32')
network = IdentityNetwork(dtype='float16')
self.assertEqual(network.dtype, 'float16')
self.assertEqual(network(array_ops.constant(1, 'float64')).dtype, 'float16')
network = IdentityNetwork(autocast=False)
self.assertEqual(network.dtype, 'float32')
self.assertEqual(network(array_ops.constant(1, 'float64')).dtype, 'float64')
class AttrTrackingLayer(base_layer.Layer):
"""Count how many times `dynamic` and `stateful` are called.
These counts are used to test that the attribute cache behaves as expected.
"""
def __init__(self, *args, **kwargs):
self.stateful_count = 0
self.dynamic_count = 0
super(AttrTrackingLayer, self).__init__(*args, **kwargs)
@base_layer.Layer.stateful.getter
def stateful(self):
self.stateful_count += 1
return super(AttrTrackingLayer, self).stateful
@property
def dynamic(self):
self.dynamic_count += 1
return super(AttrTrackingLayer, self).dynamic
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CacheCorrectnessTest(keras_parameterized.TestCase):
def layer_and_network_test(self):
# Top level layer
network = functional.Functional()
layer_0 = AttrTrackingLayer()
sub_network = functional.Functional()
layer_1 = AttrTrackingLayer(dynamic=True)
layer_2 = AttrTrackingLayer()
sub_network.sub_layers = [layer_1, layer_2]
network.sub_layer = layer_0
for _ in range(2):
self.assertEqual(network.dynamic, False)
self.assertEqual(network.stateful, False)
# The second pass should be a cache hit.
self.assertEqual(layer_0.dynamic_count, 1)
self.assertEqual(layer_0.stateful_count, 1)
# Mutations of the sub-layer should force recalculation of the network's
# stateful attribute. (mutations bubble up.)
layer_0.stateful = True
self.assertEqual(network.stateful, True)
self.assertEqual(layer_0.stateful_count, 2)
layer_0.stateful = False
self.assertEqual(network.stateful, False)
self.assertEqual(layer_0.stateful_count, 3)
# But changing stateful should not affect dynamic.
self.assertEqual(network.dynamic, False)
self.assertEqual(layer_0.dynamic_count, 1)
network.sub_network = sub_network
# Adding to the topology should invalidate the cache and reflect in the top
# level network.
self.assertEqual(network.dynamic, True)
self.assertEqual(layer_0.dynamic_count, 2)
self.assertEqual(layer_1.dynamic_count, 1)
# Still dynamic, but we need to recompute.
sub_network.sub_layers.pop()
self.assertEqual(network.dynamic, True)
self.assertEqual(layer_0.dynamic_count, 3)
self.assertEqual(layer_1.dynamic_count, 2)
# Now that we've removed the dynamic layer deep in the layer hierarchy, we
# need to make sure that that bubbles up through all the levels.
sub_network.sub_layers.pop()
self.assertEqual(network.dynamic, False)
self.assertEqual(layer_0.dynamic_count, 4)
self.assertEqual(layer_1.dynamic_count, 2)
# Now check with a tracked dict.
sub_network.sub_layers = {
"layer_1": layer_1,
"layer_2": layer_2,
}
self.assertEqual(network.dynamic, True)
self.assertEqual(layer_0.dynamic_count, 5)
self.assertEqual(layer_1.dynamic_count, 3)
# In-place assignment should still invalidate the cache.
sub_network.sub_layers["layer_1"] = layer_1
self.assertEqual(network.dynamic, True)
self.assertEqual(layer_0.dynamic_count, 6)
self.assertEqual(layer_1.dynamic_count, 4)
sub_network.sub_layers["layer_1"] = None
for _ in range(2):
self.assertEqual(network.dynamic, False)
self.assertEqual(layer_0.dynamic_count, 7)
self.assertEqual(layer_1.dynamic_count, 4)
layer_3 = AttrTrackingLayer()
layer_3.stateful = True
sub_network.sub_layers = None
self.assertEqual(network.dynamic, False)
self.assertEqual(network.stateful, False)
# Test duplicate layers.
sub_network.sub_layers = [layer_1, layer_1, layer_1, layer_3]
self.assertEqual(network.dynamic, True)
self.assertEqual(network.stateful, True)
for _ in range(3):
sub_network.sub_layers.pop()
self.assertEqual(network.dynamic, True)
self.assertEqual(network.stateful, False)
sub_network.sub_layers.pop()
self.assertEqual(network.dynamic, False)
self.assertEqual(network.stateful, False)
def test_compute_output_shape_cache(self):
# See https://github.com/tensorflow/tensorflow/issues/32029.
x = input_layer_lib.Input(shape=(None, 32))
dense = layers.Dense(2)
y = dense(x)
network = functional.Functional(x, y, name='dense_network')
for i in range(999, 1024):
self.assertEqual(network.compute_output_shape((1, i, 32)), (1, i, 2))
def test_2d_inputs_squeezed_to_1d(self):
input_1d = input_layer_lib.Input(shape=())
outputs = input_1d * 2.
net = functional.Functional(input_1d, outputs)
x = np.ones((10, 1))
y = net(x)
self.assertEqual(y.shape.rank, 1)
def test_1d_inputs_expanded_to_2d(self):
input_1d = input_layer_lib.Input(shape=(1,))
outputs = input_1d * 2.
net = functional.Functional(input_1d, outputs)
x = np.ones((10,))
y = net(x)
self.assertEqual(y.shape.rank, 2)
def test_training_passed_during_construction(self):
def _call(inputs, training):
if training is None:
return inputs * -1.0
elif training:
return inputs
else:
return inputs * 0.0
class MyLayer(base_layer.Layer):
def call(self, inputs, training=True):
return _call(inputs, training)
my_layer = MyLayer()
x = np.ones((1, 10))
# Hard-coded `true` value passed during construction is respected.
inputs = input_layer_lib.Input(10)
outputs = my_layer(inputs, training=True)
network = functional.Functional(inputs, outputs)
self.assertAllEqual(network(x, training=True), _call(x, True))
self.assertAllEqual(network(x, training=False), _call(x, True))
self.assertAllEqual(network(x), _call(x, True))
# Hard-coded `false` value passed during construction is respected.
inputs = input_layer_lib.Input(10)
outputs = my_layer(inputs, training=False)
network = functional.Functional(inputs, outputs)
self.assertAllEqual(network(x, training=True), _call(x, False))
self.assertAllEqual(network(x, training=False), _call(x, False))
self.assertAllEqual(network(x), _call(x, False))
if context.executing_eagerly():
# In v2, construction still works when no `training` is specified
# When no value passed during construction, it uses the local default.
inputs = input_layer_lib.Input(10)
outputs = my_layer(inputs)
network = functional.Functional(inputs, outputs)
self.assertAllEqual(network(x, training=True), _call(x, True))
self.assertAllEqual(network(x, training=False), _call(x, False))
self.assertAllEqual(network(x), _call(x, True)) # Use local default
# `None` value passed positionally during construction is ignored at runtime
inputs = input_layer_lib.Input(10)
outputs = my_layer(inputs, None)
network = functional.Functional(inputs, outputs)
self.assertAllEqual(network(x, training=True), _call(x, True))
self.assertAllEqual(network(x, training=False), _call(x, False))
if context.executing_eagerly():
self.assertAllEqual(network(x), _call(x, True)) # Use local default
else:
# in v1 training would have defaulted to using the `None` inside the layer
# if training is not passed at runtime
self.assertAllEqual(network(x), _call(x, None))
# `None` value passed as kwarg during construction is ignored at runtime.
inputs = input_layer_lib.Input(10)
outputs = my_layer(inputs, training=None)
network = functional.Functional(inputs, outputs)
self.assertAllEqual(network(x, training=True), _call(x, True))
self.assertAllEqual(network(x, training=False), _call(x, False))
if context.executing_eagerly():
self.assertAllEqual(network(x), _call(x, True)) # Use local default
else:
# in v1 training would have defaulted to using the `None` inside the layer
# if training is not passed at runtime
self.assertAllEqual(network(x), _call(x, None))
class InputsOutputsErrorTest(keras_parameterized.TestCase):
@testing_utils.enable_v2_dtype_behavior
def test_input_error(self):
inputs = input_layer_lib.Input((10,))
outputs = layers.Dense(10)(inputs)
with self.assertRaisesRegex(
TypeError, "('Keyword argument not understood:', 'input')"):
models.Model(input=inputs, outputs=outputs)
@testing_utils.enable_v2_dtype_behavior
def test_output_error(self):
inputs = input_layer_lib.Input((10,))
outputs = layers.Dense(10)(inputs)
with self.assertRaisesRegex(
TypeError, "('Keyword argument not understood:', 'output')"):
models.Model(inputs=inputs, output=outputs)
def test_input_spec(self):
if not context.executing_eagerly():
return
inputs = input_layer_lib.Input((10,))
outputs = layers.Dense(10)(inputs)
model = models.Model(inputs, outputs)
with self.assertRaisesRegex(
ValueError, r'.*expected shape=.*'):
model(np.zeros((3, 11)))
def test_input_spec_list_of_inputs(self):
if not context.executing_eagerly():
return
input_1 = input_layer_lib.Input((10,), name='1')
input_2 = input_layer_lib.Input((5,), name='2')
x = layers.Concatenate()([input_1, input_2])
outputs = layers.Dense(10)(x)
model = models.Model([input_1, input_2], outputs)
with self.assertRaisesRegex(
ValueError, r'.*expects 2 input.*'):
model(np.zeros((3, 10)))
with self.assertRaisesRegex(
ValueError, r'.*expects 2 input.*'):
model([np.zeros((3, 10)), np.zeros((3, 5)), np.zeros((3, 10))])
with self.assertRaisesRegex(
ValueError, r'.*expected shape=.*'):
model([np.zeros((3, 10)), np.zeros((3, 6))])
# Test passing data via dict keyed by input name
with self.assertRaisesRegex(
ValueError, r'Missing data for input.*'):
model({'1': np.zeros((3, 10))})
with self.assertRaisesRegex(
ValueError, r'.*expected shape=.*'):
model({'1': np.zeros((3, 10)), '2': np.zeros((3, 6))})
def test_input_spec_dict(self):
if not context.executing_eagerly():
return
input_1 = input_layer_lib.Input((10,))
input_2 = input_layer_lib.Input((5,))
x = layers.Concatenate()([input_1, input_2])
outputs = layers.Dense(10)(x)
model = models.Model({'1': input_1, '2': input_2}, outputs)
with self.assertRaisesRegex(
ValueError, r'Missing data for input.*'):
model({'1': np.zeros((3, 10))})
with self.assertRaisesRegex(
ValueError, r'.*expected shape=.*'):
model({'1': np.zeros((3, 10)), '2': np.zeros((3, 6))})
class FunctionalSubclassModel(training_lib.Model):
def __init__(self, *args, **kwargs):
my_input = input_layer_lib.Input(shape=(16,))
dense = layers.Dense(32, activation='relu')
output = dense(my_input)
outputs = {'output': output}
super().__init__(inputs=[my_input], outputs=outputs, *args, **kwargs)
class MixinClass(object):
def __init__(self, foo, **kwargs):
self._foo = foo
super().__init__(**kwargs)
def get_foo(self):
return self._foo
class SubclassedModel(training_lib.Model):
def __init__(self, bar, **kwargs):
self._bar = bar
super().__init__(**kwargs)
def get_bar(self):
return self._bar
class MultipleInheritanceModelTest(keras_parameterized.TestCase):
def testFunctionalSubclass(self):
m = FunctionalSubclassModel()
# Some smoke test for the weights and output shape of the model
self.assertLen(m.weights, 2)
self.assertEqual(m.outputs[0].shape.as_list(), [None, 32])
def testFunctionalSubclassPreMixin(self):
class MixedFunctionalSubclassModel(MixinClass, FunctionalSubclassModel):
pass
m = MixedFunctionalSubclassModel(foo='123')
self.assertTrue(m._is_graph_network)
self.assertLen(m.weights, 2)
self.assertEqual(m.outputs[0].shape.as_list(), [None, 32])
self.assertEqual(m.get_foo(), '123')
def testFunctionalSubclassPostMixin(self):
# Make sure the the mixin class is also init correct when the order changed.
class MixedFunctionalSubclassModel(FunctionalSubclassModel, MixinClass):
pass
m = MixedFunctionalSubclassModel(foo='123')
self.assertTrue(m._is_graph_network)
self.assertLen(m.weights, 2)
self.assertEqual(m.outputs[0].shape.as_list(), [None, 32])
self.assertEqual(m.get_foo(), '123')
def testSubclassModelPreMixin(self):
class MixedSubclassModel(MixinClass, SubclassedModel):
pass
m = MixedSubclassModel(foo='123', bar='456')
self.assertFalse(m._is_graph_network)
self.assertEqual(m.get_foo(), '123')
self.assertEqual(m.get_bar(), '456')
if __name__ == '__main__':
test.main()
|
{
"content_hash": "c104ee9fb4ac0b0fb8be96332d51beb5",
"timestamp": "",
"source": "github",
"line_count": 2533,
"max_line_length": 80,
"avg_line_length": 35.895380971180415,
"alnum_prop": 0.6373964783388142,
"repo_name": "freedomtan/tensorflow",
"id": "fea3ee16da7441e1d487a253f06d03fbe2ff316e",
"size": "91610",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/engine/functional_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32479"
},
{
"name": "Batchfile",
"bytes": "38366"
},
{
"name": "C",
"bytes": "1035837"
},
{
"name": "C#",
"bytes": "13395"
},
{
"name": "C++",
"bytes": "99324075"
},
{
"name": "CMake",
"bytes": "107781"
},
{
"name": "Dockerfile",
"bytes": "283435"
},
{
"name": "Go",
"bytes": "2013128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "928595"
},
{
"name": "Jupyter Notebook",
"bytes": "981916"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "4489624"
},
{
"name": "Makefile",
"bytes": "97500"
},
{
"name": "NASL",
"bytes": "8048"
},
{
"name": "Objective-C",
"bytes": "141623"
},
{
"name": "Objective-C++",
"bytes": "360423"
},
{
"name": "PHP",
"bytes": "20570"
},
{
"name": "Pawn",
"bytes": "32277"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42762396"
},
{
"name": "RobotFramework",
"bytes": "2661"
},
{
"name": "Roff",
"bytes": "2515"
},
{
"name": "Ruby",
"bytes": "6723"
},
{
"name": "Shell",
"bytes": "647623"
},
{
"name": "Smarty",
"bytes": "52687"
},
{
"name": "Starlark",
"bytes": "4632847"
},
{
"name": "Swift",
"bytes": "56924"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from AlgorithmImports import *
from clr import GetClrType as typeof
from Selection.UniverseSelectionModel import UniverseSelectionModel
class FutureUniverseSelectionModel(UniverseSelectionModel):
'''Provides an implementation of IUniverseSelectionMode that subscribes to future chains'''
def __init__(self,
refreshInterval,
futureChainSymbolSelector,
universeSettings = None):
'''Creates a new instance of FutureUniverseSelectionModel
Args:
refreshInterval: Time interval between universe refreshes</param>
futureChainSymbolSelector: Selects symbols from the provided future chain
universeSettings: Universe settings define attributes of created subscriptions, such as their resolution and the minimum time in universe before they can be removed'''
self.nextRefreshTimeUtc = datetime.min
self.refreshInterval = refreshInterval
self.futureChainSymbolSelector = futureChainSymbolSelector
self.universeSettings = universeSettings
def GetNextRefreshTimeUtc(self):
'''Gets the next time the framework should invoke the `CreateUniverses` method to refresh the set of universes.'''
return self.nextRefreshTimeUtc
def CreateUniverses(self, algorithm):
'''Creates a new fundamental universe using this class's selection functions
Args:
algorithm: The algorithm instance to create universes for
Returns:
The universe defined by this model'''
self.nextRefreshTimeUtc = algorithm.UtcTime + self.refreshInterval
uniqueSymbols = set()
for futureSymbol in self.futureChainSymbolSelector(algorithm.UtcTime):
if futureSymbol.SecurityType != SecurityType.Future:
raise ValueError("futureChainSymbolSelector must return future symbols.")
# prevent creating duplicate future chains -- one per symbol
if futureSymbol not in uniqueSymbols:
uniqueSymbols.add(futureSymbol)
yield self.CreateFutureChain(algorithm, futureSymbol)
def CreateFutureChain(self, algorithm, symbol):
'''Creates a FuturesChainUniverse for a given symbol
Args:
algorithm: The algorithm instance to create universes for
symbol: Symbol of the future
Returns:
FuturesChainUniverse for the given symbol'''
if symbol.SecurityType != SecurityType.Future:
raise ValueError("CreateFutureChain requires an future symbol.")
# rewrite non-canonical symbols to be canonical
market = symbol.ID.Market
if not symbol.IsCanonical():
symbol = Symbol.Create(symbol.Value, SecurityType.Future, market, f"/{symbol.Value}")
# resolve defaults if not specified
settings = self.universeSettings if self.universeSettings is not None else algorithm.UniverseSettings
# create canonical security object, but don't duplicate if it already exists
securities = [s for s in algorithm.Securities if s.Key == symbol]
if len(securities) == 0:
futureChain = self.CreateFutureChainSecurity(algorithm, symbol, settings)
else:
futureChain = securities[0]
# set the future chain contract filter function
futureChain.SetFilter(self.Filter)
# force future chain security to not be directly tradable AFTER it's configured to ensure it's not overwritten
futureChain.IsTradable = False
return FuturesChainUniverse(futureChain, settings)
def CreateFutureChainSecurity(self, algorithm, symbol, settings):
'''Creates the canonical Future chain security for a given symbol
Args:
algorithm: The algorithm instance to create universes for
symbol: Symbol of the future
settings: Universe settings define attributes of created subscriptions, such as their resolution and the minimum time in universe before they can be removed
Returns
Future for the given symbol'''
config = algorithm.SubscriptionManager.SubscriptionDataConfigService.Add(typeof(ZipEntryName),
symbol,
settings.Resolution,
settings.FillForward,
settings.ExtendedMarketHours,
False)
return algorithm.Securities.CreateSecurity(symbol, config, settings.Leverage, False)
def Filter(self, filter):
'''Defines the future chain universe filter'''
# NOP
return filter
|
{
"content_hash": "c4d18ccce8ffa06298600ff78d15ad09",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 179,
"avg_line_length": 50.7319587628866,
"alnum_prop": 0.6433651696809591,
"repo_name": "jameschch/Lean",
"id": "6f53dbbb21ca37c2a58b9133258a3a437f8d617f",
"size": "5609",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Algorithm.Framework/Selection/FutureUniverseSelectionModel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2540"
},
{
"name": "C#",
"bytes": "15402085"
},
{
"name": "Dockerfile",
"bytes": "1226"
},
{
"name": "F#",
"bytes": "1723"
},
{
"name": "HTML",
"bytes": "2607907"
},
{
"name": "Java",
"bytes": "852"
},
{
"name": "Jupyter Notebook",
"bytes": "16348"
},
{
"name": "Python",
"bytes": "654580"
},
{
"name": "Shell",
"bytes": "2307"
},
{
"name": "Visual Basic",
"bytes": "2448"
}
],
"symlink_target": ""
}
|
from .base import make_agg_primitive, make_trans_primitive
from .standard import *
from .utils import (
get_aggregation_primitives,
get_default_aggregation_primitives,
get_default_transform_primitives,
get_transform_primitives,
list_primitives
)
|
{
"content_hash": "9bffadcd54063743009884f6b9e45fb7",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 58,
"avg_line_length": 29.555555555555557,
"alnum_prop": 0.7556390977443609,
"repo_name": "Featuretools/featuretools",
"id": "08217a8f1a5a49f5663c737c792b6c6f1446f48c",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/latest-dep-update-03d11f0",
"path": "featuretools/primitives/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3340"
},
{
"name": "Makefile",
"bytes": "736"
},
{
"name": "Python",
"bytes": "921333"
},
{
"name": "Shell",
"bytes": "511"
}
],
"symlink_target": ""
}
|
import pytest
from os.path import join, realpath, dirname
@pytest.mark.torch
def test_featurize():
"""Test that BertFeaturizer.featurize() correctly featurizes all sequences,
correctly outputs input_ids and attention_mask."""
from deepchem.feat.bert_tokenizer import BertFeaturizer
from transformers import BertTokenizerFast
sequences = [
'[CLS] D L I P T S S K L V [SEP]', '[CLS] V K K A F F A L V T [SEP]'
]
sequence_long = ['[CLS] D L I P T S S K L V V K K A F F A L V T [SEP]']
tokenizer = BertTokenizerFast.from_pretrained(
"Rostlab/prot_bert", do_lower_case=False)
featurizer = BertFeaturizer(tokenizer)
feats = featurizer(sequences)
long_feat = featurizer(sequence_long)
assert (len(feats) == 2)
assert (all([len(f) == 3 for f in feats]))
assert (len(long_feat) == 1)
assert (len(long_feat[0] == 2))
@pytest.mark.torch
def test_loading():
"""Test that the FASTA loader can load with this featurizer."""
from transformers import BertTokenizerFast
from deepchem.feat.bert_tokenizer import BertFeaturizer
from deepchem.data.data_loader import FASTALoader
tokenizer = BertTokenizerFast.from_pretrained(
"Rostlab/prot_bert", do_lower_case=False)
featurizer = BertFeaturizer(tokenizer)
loader = FASTALoader(
featurizer=featurizer, legacy=False, auto_add_annotations=True)
file_loc = realpath(__file__)
directory = dirname(file_loc)
data = loader.create_dataset(
input_files=join(directory, "data/uniprot_truncated.fasta"))
assert data.X.shape == (61, 3, 5)
|
{
"content_hash": "f8e3307d0a5048df8503e7f2a4f78c13",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 77,
"avg_line_length": 35.15909090909091,
"alnum_prop": 0.7084680025856497,
"repo_name": "peastman/deepchem",
"id": "af00408c1fe6abd0e8e6337068d77e4a5f841bfa",
"size": "1547",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deepchem/feat/tests/test_bert_tokenizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "6616"
},
{
"name": "Jupyter Notebook",
"bytes": "59756"
},
{
"name": "PowerShell",
"bytes": "1504"
},
{
"name": "Python",
"bytes": "3769760"
},
{
"name": "Shell",
"bytes": "5145"
}
],
"symlink_target": ""
}
|
from django.apps import apps
from django.contrib.contenttypes.models import ContentType
from django.core.management import BaseCommand
from django.db import DEFAULT_DB_ALIAS, router
from django.db.models.deletion import Collector
from ...management import get_contenttypes_and_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--database', action='store', dest='database', default=DEFAULT_DB_ALIAS,
help='Nominates the database to use. Defaults to the "default" database.',
)
def handle(self, **options):
db = options['database']
interactive = options['interactive']
verbosity = options['verbosity']
for app_config in apps.get_app_configs():
content_types, app_models = get_contenttypes_and_models(app_config, db, ContentType)
if not app_models:
continue
to_remove = [
ct for (model_name, ct) in content_types.items()
if model_name not in app_models
]
# Confirm that the content type is stale before deletion.
using = router.db_for_write(ContentType)
if to_remove:
if interactive:
ct_info = []
for ct in to_remove:
ct_info.append(' - Content type for %s.%s' % (ct.app_label, ct.model))
collector = NoFastDeleteCollector(using=using)
collector.collect([ct])
for obj_type, objs in collector.data.items():
if objs == {ct}:
continue
ct_info.append(' - %s %s object(s)' % (
len(objs),
obj_type._meta.label,
))
content_type_display = '\n'.join(ct_info)
self.stdout.write("""Some content types in your database are stale and can be deleted.
Any objects that depend on these content types will also be deleted.
The content types and dependent objects that would be deleted are:
%s
This list doesn't include any cascade deletions to data outside of Django's
models (uncommon).
Are you sure you want to delete these content types?
If you're unsure, answer 'no'.\n""" % content_type_display)
ok_to_delete = input("Type 'yes' to continue, or 'no' to cancel: ")
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in to_remove:
if verbosity >= 2:
self.stdout.write("Deleting stale content type '%s | %s'" % (ct.app_label, ct.model))
ct.delete()
else:
if verbosity >= 2:
self.stdout.write("Stale content types remain.")
class NoFastDeleteCollector(Collector):
def can_fast_delete(self, *args, **kwargs):
"""
Always load related objects to display them when showing confirmation.
"""
return False
|
{
"content_hash": "06e84c336f587f098b128c9670e6bc93",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 113,
"avg_line_length": 40.785714285714285,
"alnum_prop": 0.5437828371278459,
"repo_name": "MoritzS/django",
"id": "e5f77dc7dfdde63353e3c1510401721b92eb9c5e",
"size": "3426",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "django/contrib/contenttypes/management/commands/remove_stale_contenttypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52508"
},
{
"name": "HTML",
"bytes": "173554"
},
{
"name": "JavaScript",
"bytes": "452093"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12036628"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import myhdl
from myhdl import (Signal, intbv, always_seq, always_comb, concat)
from rhea.build.boards import get_board
@myhdl.block
def zybo_blink(led, btn, clock):
maxcnt = int(clock.frequency)
cnt = Signal(intbv(0, min=0, max=maxcnt))
toggle = Signal(bool(0))
@always_seq(clock.posedge, reset=None)
def rtl():
if cnt == maxcnt-1:
toggle.next = not toggle
cnt.next = 0
else:
cnt.next = cnt + 1
@always_comb
def rtl_assign():
if btn:
led.next = btn
else:
led.next = concat("000", toggle)
return rtl, rtl_assign
def build():
brd = get_board('zybo')
flow = brd.get_flow(zybo_blink)
flow.run()
def main():
build()
if __name__ == '__main__':
main()
|
{
"content_hash": "5c0ad46cadcb0261171be784d1c53f59",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 66,
"avg_line_length": 19.19047619047619,
"alnum_prop": 0.5533498759305211,
"repo_name": "NickShaffner/rhea",
"id": "94ca6acf62ce82b379f8945a1fe7683faa52839c",
"size": "807",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/boards/zybo/blinky.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2185"
},
{
"name": "Python",
"bytes": "671881"
},
{
"name": "Shell",
"bytes": "1590"
},
{
"name": "VHDL",
"bytes": "10452"
},
{
"name": "Verilog",
"bytes": "22193"
}
],
"symlink_target": ""
}
|
"""AFF4 RDFValue implementations.
This module contains all RDFValue implementations.
NOTE: This module uses the class registry to contain all implementations of
RDFValue class, regardless of where they are defined. To do this reliably, these
implementations must be imported _before_ the relevant classes are referenced
from this module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
import binascii
import calendar
import collections
import datetime
import functools
import logging
import posixpath
import re
import time
import zlib
import dateutil
from dateutil import parser
from future.builtins import filter
from future.builtins import int
from future.builtins import str
from future.utils import iteritems
from future.utils import python_2_unicode_compatible
from future.utils import string_types
from future.utils import with_metaclass
from typing import cast, Any, Text, Union
from grr_response_core.lib import registry
from grr_response_core.lib import utils
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_core.lib.util import random
# Somewhere to keep all the late binding placeholders.
_LATE_BINDING_STORE = {}
def RegisterLateBindingCallback(target_name, callback, **kwargs):
"""Registers a callback to be invoked when the RDFValue named is declared."""
_LATE_BINDING_STORE.setdefault(target_name, []).append((callback, kwargs))
class Error(Exception):
"""Errors generated by RDFValue parsers."""
class InitializeError(Error):
"""Raised when we can not initialize from this parameter."""
class DecodeError(InitializeError, ValueError):
"""Generated when we can not decode the data."""
def __init__(self, msg):
logging.debug(msg)
super(DecodeError, self).__init__(msg)
class RDFValueMetaclass(registry.MetaclassRegistry):
"""A metaclass for managing semantic values."""
def __init__(cls, name, bases, env_dict): # pylint: disable=no-self-argument
super(RDFValueMetaclass, cls).__init__(name, bases, env_dict)
# Run and clear any late binding callbacks registered for this class.
for callback, kwargs in _LATE_BINDING_STORE.pop(name, []):
callback(target=cls, **kwargs)
class HashUnsupportedError(Exception):
"""Raised in __hash__ of a class that will soon be unhashable."""
def __init__(self, cls):
precondition.AssertType(cls, type)
super(HashUnsupportedError,
self).__init__("Hashing {} is unsupported!".format(
compatibility.GetName(cls)))
@python_2_unicode_compatible
class RDFValue(with_metaclass(RDFValueMetaclass, object)):
"""Baseclass for values.
RDFValues are serialized to and from the data store.
"""
# This is how the attribute will be serialized to the data store. It must
# indicate both the type emitted by SerializeToDataStore() and expected by
# FromDatastoreValue()
data_store_type = "bytes"
# URL pointing to a help page about this value type.
context_help_url = None
_value = None
_prev_hash = None
# Mark as dirty each time we modify this object.
dirty = False
# If this value was created as part of an AFF4 attribute, the attribute is
# assigned here.
attribute_instance = None
def __init__(self, initializer=None):
"""Constructor must be able to take no args.
Args:
initializer: Optional parameter to construct from.
Raises:
InitializeError: if we can not be initialized from this parameter.
"""
# Default timestamp is now.
# Allow an RDFValue to be initialized from an identical RDFValue.
# TODO(user):pytype: type checker can't infer that the initializer
# is not None after the check below.
if initializer.__class__ == self.__class__:
self.ParseFromBytes(cast(self.__class__, initializer).SerializeToBytes())
self._prev_hash = None
def Copy(self):
"""Make a new copy of this RDFValue."""
res = self.__class__() # pytype: disable=not-instantiable
res.ParseFromBytes(self.SerializeToBytes())
return res
def SetRaw(self, value):
self._value = value
def __copy__(self):
return self.Copy()
@abc.abstractmethod
def ParseFromBytes(self, string):
"""Given a string, parse ourselves from it."""
pass
@abc.abstractmethod
def ParseFromDatastore(self, value):
"""Initialize the RDF object from the datastore value."""
pass
@classmethod
def FromDatastoreValue(cls, value):
res = cls()
res.ParseFromDatastore(value)
return res
@classmethod
def FromSerializedBytes(cls, value):
res = cls()
res.ParseFromBytes(value)
return res
# TODO: Remove legacy SerializeToDataStore.
def SerializeToDataStore(self):
"""Serialize to a datastore compatible form."""
return self.SerializeToBytes()
@abc.abstractmethod
def SerializeToBytes(self):
"""Serialize into a string which can be parsed using ParseFromBytes."""
@classmethod
def Fields(cls):
"""Return a list of fields which can be queried from this value."""
return []
def __eq__(self, other):
return self._value == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
new_hash = hash(self.SerializeToBytes())
if self._prev_hash is not None and new_hash != self._prev_hash:
raise AssertionError(
"Usage of {} violates Python data model: hash() has changed! Usage "
"of RDFStructs as members of sets or keys of dicts is discouraged. "
"If used anyway, mutating is prohibited, because it causes the hash "
"to change. Be aware that accessing unset fields can trigger a "
"mutation.".format(compatibility.GetName(type(self))))
else:
self._prev_hash = new_hash
return new_hash
def __bool__(self):
return bool(self._value)
# TODO: Remove after support for Python 2 is dropped.
__nonzero__ = __bool__
def __str__(self): # pylint: disable=super-on-old-class
"""Ignores the __repr__ override below to avoid indefinite recursion."""
return super(RDFValue, self).__repr__()
def __repr__(self):
content = str(self)
# Note %r, which prevents nasty nonascii characters from being printed,
# including dangerous terminal escape sequences.
return "<%s(%r)>" % (compatibility.GetName(self.__class__), content)
class RDFPrimitive(RDFValue):
@classmethod
def FromHumanReadable(cls, string):
instance = cls()
instance.ParseFromHumanReadable(string)
return instance
@abc.abstractmethod
def ParseFromHumanReadable(self, string):
"""Initializes the object from human-readable string.
Args:
string: An `unicode` value to initialize the object from.
"""
@python_2_unicode_compatible
class RDFBytes(RDFPrimitive):
"""An attribute which holds bytes."""
data_store_type = "bytes"
def __init__(self, initializer=None):
super(RDFBytes, self).__init__()
if initializer is None:
self._value = b""
elif isinstance(initializer, bytes):
self._value = initializer
elif isinstance(initializer, RDFBytes):
self._value = initializer.AsBytes()
else:
message = "Unexpected initializer `{value}` of type {type}"
raise TypeError(message.format(value=initializer, type=type(initializer)))
def ParseFromBytes(self, string):
precondition.AssertType(string, bytes)
self._value = string
def ParseFromDatastore(self, value):
precondition.AssertType(value, bytes)
self._value = value
def ParseFromHumanReadable(self, string):
precondition.AssertType(string, Text)
self._value = string.encode("utf-8")
def AsBytes(self):
return self._value
def SerializeToBytes(self):
return self._value
def __str__(self):
return binascii.hexlify(self._value).decode("ascii")
def __hash__(self):
return hash(self._value)
def __lt__(self, other):
if isinstance(other, self.__class__):
return self._value < other._value # pylint: disable=protected-access
else:
return self._value < other
def __gt__(self, other):
if isinstance(other, self.__class__):
return self._value > other._value # pylint: disable=protected-access
else:
return self._value > other
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._value == other._value # pylint: disable=protected-access
else:
return self._value == other
def __len__(self):
return len(self._value)
class RDFZippedBytes(RDFBytes):
"""Zipped bytes sequence."""
def Uncompress(self):
if self:
return zlib.decompress(self._value)
else:
return b""
@functools.total_ordering
@python_2_unicode_compatible
class RDFString(RDFPrimitive):
"""Represent a simple string."""
data_store_type = "string"
_value = u""
# TODO(hanuszczak): Allow initializing from arbitrary `unicode`-able object.
def __init__(self, initializer=None):
super(RDFString, self).__init__(initializer=None)
if isinstance(initializer, RDFString):
self._value = initializer._value # pylint: disable=protected-access
elif isinstance(initializer, bytes):
self.ParseFromBytes(initializer)
elif isinstance(initializer, Text):
self._value = initializer
elif initializer is not None:
message = "Unexpected initializer `%s` of type `%s`"
message %= (initializer, type(initializer))
raise TypeError(message)
def format(self, *args, **kwargs): # pylint: disable=invalid-name
return self._value.format(*args, **kwargs)
def split(self, *args, **kwargs): # pylint: disable=invalid-name
return self._value.split(*args, **kwargs)
def __str__(self):
return self._value
def __hash__(self):
return hash(self._value)
def __getitem__(self, item):
return self._value.__getitem__(item)
def __len__(self):
return len(self._value)
def __eq__(self, other):
if isinstance(other, RDFString):
return self._value == other._value # pylint: disable=protected-access
if isinstance(other, Text):
return self._value == other
# TODO(hanuszczak): Comparing `RDFString` and `bytes` should result in type
# error. For now we allow it because too many tests still use non-unicode
# string literals.
if isinstance(other, bytes):
return self._value.encode("utf-8") == other
return NotImplemented
def __lt__(self, other):
if isinstance(other, RDFString):
return self._value < other._value # pylint: disable=protected-access
if isinstance(other, Text):
return self._value < other
# TODO(hanuszczak): Comparing `RDFString` and `bytes` should result in type
# error. For now we allow it because too many tests still use non-unicode
# string literals.
if isinstance(other, bytes):
return self._value.encode("utf-8") < other
return NotImplemented
def ParseFromBytes(self, string):
precondition.AssertType(string, bytes)
self._value = string.decode("utf-8")
def ParseFromDatastore(self, value):
precondition.AssertType(value, Text)
self._value = value
def ParseFromHumanReadable(self, string):
precondition.AssertType(string, Text)
self._value = string
def SerializeToBytes(self):
return self._value.encode("utf-8")
def SerializeToDataStore(self):
return self._value
# TODO(hanuszczak): This class should provide custom method for parsing from
# human readable strings (and arguably should not derive from `RDFBytes` at
# all).
@python_2_unicode_compatible
class HashDigest(RDFBytes):
"""Binary hash digest with hex string representation."""
data_store_type = "bytes"
def HexDigest(self):
return binascii.hexlify(self._value).decode("ascii")
def __str__(self):
return self.HexDigest()
def __hash__(self):
return hash(self._value)
# TODO(hanuszczak): This is a terrible equality definition.
def __eq__(self, other):
if isinstance(other, HashDigest):
return self._value == other._value # pylint: disable=protected-access
if isinstance(other, bytes):
return self._value == other
if isinstance(other, Text):
return str(self) == other
return NotImplemented
def __ne__(self, other):
return not self == other
@functools.total_ordering
@python_2_unicode_compatible
class RDFInteger(RDFPrimitive):
"""Represent an integer."""
data_store_type = "integer"
@staticmethod
def IsNumeric(value):
return isinstance(value, (int, float, RDFInteger))
def __init__(self, initializer=None):
super(RDFInteger, self).__init__(initializer=initializer)
if self._value is None:
if initializer is None:
self._value = 0
else:
self._value = compatibility.builtins.int(initializer)
def SerializeToBytes(self):
return str(self._value).encode("ascii")
def ParseFromBytes(self, string):
precondition.AssertType(string, bytes)
self._value = 0
if string:
try:
self._value = compatibility.builtins.int(string)
except TypeError as e:
raise DecodeError(e)
def ParseFromDatastore(self, value):
precondition.AssertType(value, int)
self._value = compatibility.builtins.int(value)
def ParseFromHumanReadable(self, string):
precondition.AssertType(string, Text)
self._value = compatibility.builtins.int(string)
def __str__(self):
return str(self._value)
@classmethod
def FromDatastoreValue(cls, value):
return cls(initializer=value)
def SerializeToDataStore(self):
"""Use varint to store the integer."""
return self._value
def __long__(self):
return self._value
def __int__(self):
return self._value
def __float__(self):
return float(self._value)
def __index__(self):
return self._value
def __lt__(self, other):
return self._value < other
def __and__(self, other):
return self._value & other
def __rand__(self, other):
return self._value & other
def __or__(self, other):
return self._value | other
def __ror__(self, other):
return self._value | other
def __add__(self, other):
return self._value + other
def __radd__(self, other):
return self._value + other
def __sub__(self, other):
return self._value - other
def __rsub__(self, other):
return other - self._value
def __mul__(self, other):
return self._value * other
# TODO: There are no `__rop__` methods in Python 3 so all of
# these should be removed. Also, in general it should not be possible to add
# two values with incompatible types (e.g. `RDFInteger` and `int`). Sadly,
# currently a lot of code depends on this behaviour but it should be changed
# in the future.
def __rmul__(self, other):
return self._value * other
def __div__(self, other):
return self._value.__div__(other)
def __truediv__(self, other):
return self._value.__truediv__(other)
def __floordiv__(self, other):
return self._value.__floordiv__(other)
def __hash__(self):
return hash(self._value)
class RDFBool(RDFInteger):
"""Boolean value."""
data_store_type = "unsigned_integer"
def ParseFromHumanReadable(self, string):
precondition.AssertType(string, Text)
upper_string = string.upper()
if upper_string == u"TRUE" or string == u"1":
self._value = 1
elif upper_string == u"FALSE" or string == u"0":
self._value = 0
else:
raise ValueError("Unparsable boolean string: `%s`" % string)
@python_2_unicode_compatible
class RDFDatetime(RDFInteger):
"""A date and time internally stored in MICROSECONDS."""
converter = 1000000
data_store_type = "unsigned_integer"
def __init__(self, initializer=None):
super(RDFDatetime, self).__init__(None)
self._value = 0
if initializer is None:
return
# TODO(hanuszczak): Disallow `float` initialization.
if isinstance(initializer, (RDFInteger, int, float)):
self._value = compatibility.builtins.int(initializer)
else:
raise InitializeError("Unknown initializer for RDFDateTime: %s." %
type(initializer))
@classmethod
def Now(cls):
return cls(int(time.time() * cls.converter))
def Format(self, fmt):
"""Return the value as a string formatted as per strftime semantics."""
precondition.AssertType(fmt, Text)
stime = time.gmtime(self._value / self.converter)
return compatibility.FormatTime(fmt, stime)
def __str__(self):
"""Return the date in human readable (UTC)."""
# TODO: Display microseconds if applicable.
return self.Format("%Y-%m-%d %H:%M:%S")
def AsDatetime(self):
"""Return the time as a python datetime object."""
return datetime.datetime.utcfromtimestamp(self._value / self.converter)
def AsSecondsSinceEpoch(self):
return self._value // self.converter
def AsMicrosecondsSinceEpoch(self):
return self._value
@classmethod
def FromSecondsSinceEpoch(cls, value):
# Convert to int in case we get fractional seconds with higher
# resolution than what this class supports.
return cls(int(value * cls.converter))
@classmethod
def FromMicrosecondsSinceEpoch(cls, value):
precondition.AssertType(value, int)
return cls(value)
@classmethod
def FromDatetime(cls, value):
seconds = calendar.timegm(value.utctimetuple())
return cls(seconds * cls.converter + value.microsecond)
@classmethod
def FromDate(cls, value):
seconds = calendar.timegm(value.timetuple())
return cls(seconds * cls.converter)
@classmethod
def FromHumanReadable(cls, value, eoy=False):
res = cls()
res.ParseFromHumanReadable(value, eoy=eoy)
return res
@classmethod
def Lerp(cls, t, start_time, end_time):
"""Interpolates linearly between two datetime values.
Args:
t: An interpolation "progress" value.
start_time: A value for t = 0.
end_time: A value for t = 1.
Returns:
An interpolated `RDFDatetime` instance.
Raises:
TypeError: If given time values are not instances of `RDFDatetime`.
ValueError: If `t` parameter is not between 0 and 1.
"""
if not (isinstance(start_time, RDFDatetime) and
isinstance(end_time, RDFDatetime)):
raise TypeError("Interpolation of non-datetime values")
if not 0.0 <= t <= 1.0:
raise ValueError("Interpolation progress does not belong to [0.0, 1.0]")
return cls(round((1 - t) * start_time._value + t * end_time._value)) # pylint: disable=protected-access
def ParseFromHumanReadable(self, string, eoy=False):
# TODO(hanuszczak): This method should accept only unicode literals.
self._value = self._ParseFromHumanReadable(string, eoy=eoy)
def __add__(self, other):
# TODO(hanuszczak): Disallow `float` initialization.
if isinstance(other, (int, float)):
# Assume other is in seconds
other_microseconds = compatibility.builtins.int(other * self.converter)
return self.__class__(self._value + other_microseconds)
elif isinstance(other, (DurationSeconds, Duration)):
self_us = self.AsMicrosecondsSinceEpoch()
duration_us = other.microseconds
return self.__class__.FromMicrosecondsSinceEpoch(self_us + duration_us)
return NotImplemented
def __mul__(self, other):
# TODO(hanuszczak): Disallow `float` initialization.
if isinstance(other, (int, float, DurationSeconds)):
return self.__class__(self._value * other)
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
# TODO(hanuszczak): Disallow `float` initialization.
if isinstance(other, (int, float)):
# Assume other is in seconds
other_microseconds = compatibility.builtins.int(other * self.converter)
return self.__class__(self._value - other_microseconds)
elif isinstance(other, (DurationSeconds, Duration)):
self_us = self.AsMicrosecondsSinceEpoch()
duration_us = other.microseconds
return self.__class__.FromMicrosecondsSinceEpoch(self_us - duration_us)
elif isinstance(other, RDFDatetime):
diff_us = (
self.AsMicrosecondsSinceEpoch() - other.AsMicrosecondsSinceEpoch())
return Duration.From(diff_us, MICROSECONDS)
return NotImplemented
@classmethod
def _ParseFromHumanReadable(cls, string, eoy=False):
"""Parse a human readable string of a timestamp (in local time).
Args:
string: The string to parse.
eoy: If True, sets the default value to the end of the year. Usually this
method returns a timestamp where each field that is not present in the
given string is filled with values from the date January 1st of the
current year, midnight. Sometimes it makes more sense to compare against
the end of a period so if eoy is set, the default values are copied from
the 31st of December of the current
year, 23:59h.
Returns:
The parsed timestamp.
"""
# TODO(hanuszczak): Date can come either as a single integer (which we
# interpret as a timestamp) or as a really human readable thing such as
# '2000-01-01 13:37'. This is less than ideal (since timestamps are not
# really "human readable") and should be fixed in the future.
try:
return compatibility.builtins.int(string)
except ValueError:
pass
# By default assume the time is given in UTC.
# pylint: disable=g-tzinfo-datetime
if eoy:
default = datetime.datetime(
time.gmtime().tm_year, 12, 31, 23, 59, tzinfo=dateutil.tz.tzutc())
else:
default = datetime.datetime(
time.gmtime().tm_year, 1, 1, 0, 0, tzinfo=dateutil.tz.tzutc())
# pylint: enable=g-tzinfo-datetime
timestamp = parser.parse(string, default=default)
return calendar.timegm(timestamp.utctimetuple()) * cls.converter
def Floor(self, interval):
precondition.AssertType(interval, Duration)
seconds = self.AsSecondsSinceEpoch() // interval.ToInt(
SECONDS) * interval.ToInt(SECONDS)
return self.FromSecondsSinceEpoch(seconds)
class RDFDatetimeSeconds(RDFDatetime):
"""A DateTime class which is stored in whole seconds."""
converter = 1
# Constants used as time unit in Duration methods.
MICROSECONDS = 1
MILLISECONDS = 1000
SECONDS = 1000 * MILLISECONDS
MINUTES = 60 * SECONDS
HOURS = 60 * MINUTES
DAYS = 24 * HOURS
WEEKS = 7 * DAYS
_DURATION_RE = re.compile(r"(?P<number>\d+) ?(?P<unit>[a-z]{1,2})")
@python_2_unicode_compatible
@functools.total_ordering
class Duration(RDFPrimitive):
"""Absolute duration between instants in time with microsecond precision.
The duration is stored as non-negative integer, guaranteeing microsecond
precision up to MAX_UINT64 microseconds (584k years).
"""
data_store_type = "unsigned_integer"
_DIVIDERS = collections.OrderedDict(
(("w", WEEKS), ("d", DAYS), ("h", HOURS), ("m", MINUTES), ("s", SECONDS),
("ms", MILLISECONDS), ("us", MICROSECONDS)))
def __init__(self, initializer=None):
"""Instantiates a new microsecond-based Duration.
Args:
initializer: Integer specifying microseconds, or another Duration to copy.
If None, Duration will be set to 0. Given a negative integer, its
absolute (positive) value will be stored.
"""
super(Duration, self).__init__(initializer=initializer)
if isinstance(initializer, (DurationSeconds, Duration)):
self._value = abs(initializer.microseconds)
elif isinstance(initializer, (int, RDFInteger)):
self._value = abs(int(initializer))
elif initializer is None:
self._value = 0
else:
message = "Unsupported initializer `{value}` of type `{type}`"
raise TypeError(message.format(value=initializer, type=type(initializer)))
@classmethod
def From(cls, value, timeunit):
"""Returns a new Duration given a timeunit and value.
Args:
value: A number specifying the value of the duration.
timeunit: A unit of time ranging from rdfvalue.MICROSECONDS to
rdfvalue.WEEKS.
Examples: >>> Duration.From(50, MICROSECONDS) <Duration 50 us> >>>
Duration.From(120, SECONDS) <Duration 2 m>
Returns:
A new Duration, truncated to millisecond precision.
"""
return cls(int(timeunit * value))
def ParseFromDatastore(self, value):
"""See base class."""
precondition.AssertType(value, int)
self._value = abs(compatibility.builtins.int(value))
def ParseFromBytes(self, string):
"""See base class."""
precondition.AssertType(string, bytes)
if not string:
self._value = 0
return
try:
self._value = abs(compatibility.builtins.int(string))
except ValueError as e:
raise DecodeError(e)
def SerializeToBytes(self):
"""See base class."""
# Technically, equal to ascii encoding, since str(self._value) only contains
# the digits 0-9.
return str(self._value).encode("utf-8")
def __repr__(self):
return "<Duration {}>".format(self)
def __str__(self):
if self._value == 0:
return "0 us"
for label, divider in iteritems(self._DIVIDERS):
if self._value % divider == 0:
return "%d %s" % (self._value // divider, label)
return "%d us" % self._value # Make pytype happy.
def __add__(self, other):
if isinstance(other, (Duration, DurationSeconds)):
return self.__class__(self.microseconds + other.microseconds)
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, (Duration, DurationSeconds)):
return self.__class__(self.microseconds - other.microseconds)
else:
return NotImplemented
def __mul__(self, other):
if isinstance(other, int):
return self.__class__(self.microseconds * other)
else:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __lt__(self, other):
if isinstance(other, (Duration, DurationSeconds)):
return self.microseconds < other.microseconds
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, (Duration, DurationSeconds)):
return self.microseconds == other.microseconds
else:
return NotImplemented
def ToInt(self, timeunit):
"""Returns the duration as truncated integer, converted to the time unit.
All fractions are truncated. To preserve them, use `toFractional()`.
Examples:
>>> Duration.From(2, WEEKS).ToInt(DAYS)
14
>>> Duration.From(100, SECONDS).ToInt(SECONDS)
100
>>> Duration.From(6, DAYS).ToInt(WEEKS)
0
Args:
timeunit: A unit of time ranging from rdfvalue.MICROSECONDS to
rdfvalue.WEEKS.
Returns:
An integer, representing the duration in the specific unit, truncating
fractions.
"""
return self.microseconds // timeunit
def ToFractional(self, timeunit):
"""Returns the duration as float, converted to the given time unit.
Examples:
>>> Duration.From(30, SECONDS).ToFractional(MINUTES)
0.5
>>> Duration.From(100, SECONDS).ToFractional(SECONDS)
100.0
>>> Duration.From(6, MINUTES).ToFractional(HOURS)
0.1
Args:
timeunit: A unit of time ranging from rdfvalue.MICROSECONDS to
rdfvalue.WEEKS.
Returns:
A float, representing the duration in the specific unit, including
fractions.
"""
return self.microseconds / timeunit
@property
def microseconds(self):
return self._value
def ParseFromHumanReadable(self, string):
"""See base class."""
precondition.AssertType(string, Text)
if not string:
self._value = 0
return
matches = _DURATION_RE.match(string)
if matches is None:
raise ValueError("Could not parse duration {!r}.".format(string))
number = int(matches.group("number"))
unit_string = matches.group("unit")
try:
unit_multiplier = self._DIVIDERS[unit_string]
except KeyError:
raise ValueError(
"Invalid unit {!r} for duration in {!r}. Expected any of {}.".format(
unit_string, string, ", ".join(self._DIVIDERS)))
self._value = number * unit_multiplier
# TODO: Implement microsecond precision.
@python_2_unicode_compatible
class DurationSeconds(RDFInteger):
"""Duration value stored in seconds internally."""
data_store_type = "unsigned_integer"
# pyformat: disable
DIVIDERS = collections.OrderedDict((
("w", 60 * 60 * 24 * 7),
("d", 60 * 60 * 24),
("h", 60 * 60),
("m", 60),
("s", 1)))
# pyformat: enable
def __init__(self, initializer = None):
super(DurationSeconds, self).__init__(None)
if isinstance(initializer, DurationSeconds):
self._value = initializer.seconds
elif isinstance(initializer, Duration):
self._value = initializer.ToInt(SECONDS) # type: ignore
elif isinstance(initializer, Text):
self.ParseFromHumanReadable(initializer)
elif isinstance(initializer, bytes):
self.ParseFromBytes(initializer)
elif isinstance(initializer, int):
self._value = initializer
elif isinstance(initializer, RDFInteger):
self._value = int(initializer)
elif initializer is None:
self._value = 0
else:
message = "Unsupported initializer `{value}` of type `{type}`"
raise TypeError(message.format(value=initializer, type=type(initializer)))
@classmethod
def FromDays(cls, days):
return cls.FromHours(24 * days)
@classmethod
def FromHours(cls, hours):
return cls.FromMinutes(60 * hours)
@classmethod
def FromMinutes(cls, minutes):
return cls.FromSeconds(60 * minutes)
@classmethod
def FromSeconds(cls, seconds):
return cls(seconds)
@classmethod
def FromMicroseconds(cls, microseconds):
return cls.FromSeconds(int(microseconds / 1e6))
def Validate(self, value, **_):
self.ParseFromBytes(value)
def ParseFromBytes(self, string):
precondition.AssertType(string, bytes)
self.ParseFromHumanReadable(string.decode("utf-8"))
def SerializeToBytes(self):
return str(self).encode("utf-8")
@property
def seconds(self):
return self._value
@property
def milliseconds(self):
return self._value * 1000
@property
def microseconds(self):
return self._value * 1000000
def __str__(self):
time_secs = self._value
for label, divider in iteritems(self.DIVIDERS):
if time_secs % divider == 0:
return "%d%s" % (time_secs // divider, label)
return "{} μs".format(self.microseconds)
def __add__(self, other):
if isinstance(other, (int, float, DurationSeconds)):
# Assume other is in seconds
return self.__class__(self._value + other)
return NotImplemented
def __mul__(self, other):
if isinstance(other, (int, float, DurationSeconds)):
return self.__class__(int(self._value * other))
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
if isinstance(other, (int, float, DurationSeconds)):
# Assume other is in seconds
return self.__class__(self._value - other)
return NotImplemented
def __abs__(self):
return DurationSeconds(abs(self._value))
def Expiry(self, base_time=None):
if base_time is None:
base_time = RDFDatetime.Now()
else:
base_time = base_time.Copy()
base_time_sec = base_time.AsSecondsSinceEpoch()
return RDFDatetime.FromSecondsSinceEpoch(base_time_sec + self._value)
def ParseFromHumanReadable(self, timestring):
"""Parse a human readable string of a duration.
Args:
timestring: The string to parse.
"""
precondition.AssertType(timestring, Text)
if not timestring:
return
orig_string = timestring
multiplicator = 1
if timestring[-1].isdigit():
pass
else:
try:
multiplicator = self.DIVIDERS[timestring[-1]]
except KeyError:
raise RuntimeError("Invalid duration multiplicator: '%s' ('%s')." %
(timestring[-1], orig_string))
timestring = timestring[:-1]
try:
self._value = int(timestring) * multiplicator
except ValueError:
raise InitializeError("Could not parse expiration time '%s'." %
orig_string)
def __eq__(self, other):
if isinstance(other, (Duration, DurationSeconds)):
return self.microseconds == other.microseconds
else:
return NotImplemented
class ByteSize(RDFInteger):
"""A size for bytes allowing standard unit prefixes.
We use the standard IEC 60027-2 A.2 and ISO/IEC 80000:
Binary units (powers of 2): Ki, Mi, Gi
SI units (powers of 10): k, m, g
"""
data_store_type = "unsigned_integer"
DIVIDERS = dict((
("", 1),
("k", 1000),
("m", 1000**2),
("g", 1000**3),
("ki", 1024),
("mi", 1024**2),
("gi", 1024**3),
))
REGEX = re.compile("^([0-9.]+)([kmgi]*)b?$", re.I)
def __init__(self, initializer=None):
super(ByteSize, self).__init__(None)
if isinstance(initializer, ByteSize):
self._value = initializer._value # pylint: disable=protected-access
elif isinstance(initializer, string_types):
self.ParseFromHumanReadable(initializer)
elif isinstance(initializer, (int, float)):
self._value = initializer
elif isinstance(initializer, RDFInteger):
self._value = int(initializer)
elif initializer is None:
self._value = 0
else:
raise InitializeError("Unknown initializer for ByteSize: %s." %
type(initializer))
def __str__(self):
if self._value >= 1024**3:
unit = "GiB"
value = self._value / 1024**3
elif self._value >= 1024**2:
unit = "MiB"
value = self._value / 1024**2
elif self._value >= 1024:
unit = "KiB"
value = self._value / 1024
else:
return "{} B".format(self._value)
return "{value:.1f} {unit}".format(value=value, unit=unit)
def ParseFromHumanReadable(self, string):
"""Parse a human readable string of a byte string.
Args:
string: The string to parse.
Raises:
DecodeError: If the string can not be parsed.
"""
if not string:
return None
match = self.REGEX.match(string.strip().lower())
if not match:
raise DecodeError("Unknown specification for ByteSize %s" % string)
multiplier = self.DIVIDERS.get(match.group(2))
if not multiplier:
raise DecodeError("Invalid multiplier %s" % match.group(2))
# The value may be represented as a float, but if not dont lose accuracy.
value = match.group(1)
if "." in value:
value = float(value)
else:
value = int(value)
self._value = int(value * multiplier)
@functools.total_ordering
@python_2_unicode_compatible
class RDFURN(RDFPrimitive):
"""An object to abstract URL manipulation."""
data_store_type = "string"
# Careful when changing this value, this is hardcoded a few times in this
# class for performance reasons.
scheme = "aff4"
_string_urn = ""
def __init__(self, initializer=None):
"""Constructor.
Args:
initializer: A string or another RDFURN.
"""
# This is a shortcut that is a bit faster than the standard way of
# using the RDFValue constructor to make a copy of the class. For
# RDFURNs that way is a bit slow since it would try to normalize
# the path again which is not needed - it comes from another
# RDFURN so it is already in the correct format.
if isinstance(initializer, RDFURN):
# Make a direct copy of the other object
self._string_urn = initializer.Path()
super(RDFURN, self).__init__(None)
return
super(RDFURN, self).__init__(initializer=initializer)
if self._value is None and initializer is not None:
if isinstance(initializer, bytes):
self.ParseFromBytes(initializer)
elif isinstance(initializer, Text):
self.ParseFromUnicode(initializer)
else:
message = "Unsupported initializer `%s` of type `%s"
message %= (initializer, type(initializer))
raise TypeError(message)
def ParseFromBytes(self, initializer):
"""Create RDFRUN from string.
Args:
initializer: url string
"""
precondition.AssertType(initializer, bytes)
self.ParseFromUnicode(initializer.decode("utf-8"))
def ParseFromUnicode(self, initializer):
precondition.AssertType(initializer, Text)
# Strip off the aff4: prefix if necessary.
if initializer.startswith("aff4:/"):
initializer = initializer[5:]
self._string_urn = utils.NormalizePath(initializer)
def ParseFromDatastore(self, value):
precondition.AssertType(value, Text)
# TODO(hanuszczak): We should just assign the `self._string_urn` here
# instead of including all of the parsing magic since the data store values
# should be normalized already. But sadly this is not the case and for now
# we have to deal with unnormalized values as well.
self.ParseFromUnicode(value)
def ParseFromHumanReadable(self, string):
self.ParseFromUnicode(string)
def SerializeToBytes(self):
return str(self).encode("utf-8")
def SerializeToDataStore(self):
return str(self)
def Dirname(self):
return posixpath.dirname(self._string_urn)
def Basename(self):
return posixpath.basename(self.Path())
def Add(self, path):
"""Add a relative stem to the current value and return a new RDFURN.
If urn is a fully qualified URN, replace the current value with it.
Args:
path: A string containing a relative path.
Returns:
A new RDFURN that can be chained.
Raises:
ValueError: if the path component is not a string.
"""
if not isinstance(path, string_types):
raise ValueError("Only strings should be added to a URN, not %s" %
path.__class__)
result = self.Copy()
result.Update(path=utils.JoinPath(self._string_urn, path))
return result
def Update(self, url=None, path=None):
"""Update one of the fields.
Args:
url: An optional string containing a URL.
path: If the path for this URN should be updated.
"""
if url:
self.ParseFromBytes(url)
if path:
self._string_urn = path
self.dirty = True
def Copy(self):
"""Make a copy of ourselves."""
return self.__class__(self)
def __str__(self):
return "aff4:%s" % self._string_urn
# Required, because in Python 3 overriding `__eq__` nullifies `__hash__`.
__hash__ = RDFPrimitive.__hash__
def __eq__(self, other):
if isinstance(other, string_types):
other = self.__class__(other)
elif other is None:
return False
elif not isinstance(other, RDFURN):
return NotImplemented
return self._string_urn == other.Path()
def __bool__(self):
return bool(self._string_urn)
# TODO: Remove after support for Python 2 is dropped.
__nonzero__ = __bool__
def __lt__(self, other):
return self._string_urn < other
def Path(self):
"""Return the path of the urn."""
return self._string_urn
def Split(self, count=None):
"""Returns all the path components.
Args:
count: If count is specified, the output will be exactly this many path
components, possibly extended with the empty string. This is useful for
tuple assignments without worrying about ValueErrors: namespace, path =
urn.Split(2)
Returns:
A list of path components of this URN.
"""
if count:
result = list(filter(None, self._string_urn.split("/", count)))
while len(result) < count:
result.append("")
return result
else:
return list(filter(None, self._string_urn.split("/")))
def RelativeName(self, volume):
"""Given a volume URN return the relative URN as a unicode string.
We remove the volume prefix from our own.
Args:
volume: An RDFURN or fully qualified url string.
Returns:
A string of the url relative from the volume or None if our URN does not
start with the volume prefix.
"""
string_url = utils.SmartUnicode(self)
volume_url = utils.SmartUnicode(volume)
if string_url.startswith(volume_url):
result = string_url[len(volume_url):]
# This must always return a relative path so we strip leading "/"s. The
# result is always a unicode string.
return result.lstrip("/")
return None
def __repr__(self):
return "<%s>" % self
class Subject(RDFURN):
"""A psuedo attribute representing the subject of an AFF4 object."""
DEFAULT_FLOW_QUEUE = RDFURN("F")
class SessionID(RDFURN):
"""An rdfvalue object that represents a session_id."""
def __init__(self,
initializer=None,
base="aff4:/flows",
queue=DEFAULT_FLOW_QUEUE,
flow_name=None):
"""Constructor.
Args:
initializer: A string or another RDFURN.
base: The base namespace this session id lives in.
queue: The queue to use.
flow_name: The name of this flow or its random id.
Raises:
InitializeError: The given URN cannot be converted to a SessionID.
"""
if initializer is None:
# This SessionID is being constructed from scratch.
if flow_name is None:
flow_name = random.UInt32()
if isinstance(flow_name, int):
initializer = RDFURN(base).Add("%s:%X" % (queue.Basename(), flow_name))
else:
initializer = RDFURN(base).Add("%s:%s" % (queue.Basename(), flow_name))
else:
if isinstance(initializer, RDFURN):
try:
self.ValidateID(initializer.Basename())
except ValueError as e:
raise InitializeError("Invalid URN for SessionID: %s, %s" %
(initializer, e))
super(SessionID, self).__init__(initializer=initializer)
def Add(self, path):
# Adding to a SessionID results in a normal RDFURN.
return RDFURN(self).Add(path)
@classmethod
def ValidateID(cls, id_str):
# This check is weaker than it could be because we allow queues called
# "DEBUG-user1" and IDs like "TransferStore". We also have to allow
# flows session ids like H:123456:hunt.
allowed_re = re.compile(r"^[-0-9a-zA-Z]+(:[0-9a-zA-Z]+){0,2}$")
if not allowed_re.match(id_str):
raise ValueError("Invalid SessionID: %s" % id_str)
# TODO(hanuszczak): Remove this class.
class FlowSessionID(SessionID):
pass
|
{
"content_hash": "7d16390d0b127d4fc0f2b5c26a6167a0",
"timestamp": "",
"source": "github",
"line_count": 1464,
"max_line_length": 108,
"avg_line_length": 29.095628415300546,
"alnum_prop": 0.6647572542022725,
"repo_name": "demonchild2112/travis-test",
"id": "1d912477f2b8324845d8e9b38d19f016c56f89a3",
"size": "42645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/core/grr_response_core/lib/rdfvalue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3446"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "35549"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HCL",
"bytes": "7208"
},
{
"name": "HTML",
"bytes": "190212"
},
{
"name": "JavaScript",
"bytes": "11691"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7213255"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "48882"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "51"
}
],
"symlink_target": ""
}
|
from socket import *
from time import ctime
import select
import sys
HOST=''
PORT=21569
BUFSIZ=1024
ADDR=(HOST,PORT)
tcpSerSock=socket(AF_INET,SOCK_STREAM)
tcpSerSock.bind(ADDR)
tcpSerSock.listen(5)
input=[tcpSerSock,sys.stdin]
while True:
print 'waiting for connection...'
tcpCliSock,addr=tcpSerSock.accept()
print '...connected from:',addr
input.append(tcpCliSock) #不同的就在这里,当有客户端连接时,要把他加进input
while True:
readyInput,readyOutput,readyException=select.select(input,[],[]) #每次循环都会阻塞在这里,只有当有数据输入时才会执行下面的操作
for indata in readyInput:
if indata==tcpCliSock:
data=tcpCliSock.recv(BUFSIZ)
if not data:
break
print data
else:
data=raw_input()
if not data:
break
tcpCliSock.send('[%s] %s'%(ctime(),data))
tcpCliSock.close()
|
{
"content_hash": "6f69ed7dc29b1a64ccc78e20eb5b78ab",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 98,
"avg_line_length": 21.97142857142857,
"alnum_prop": 0.7269180754226268,
"repo_name": "eggfly/gosms",
"id": "e8960284a818895b0a245b15add2b5a12dd52669",
"size": "920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chat/pysms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "1602"
},
{
"name": "Java",
"bytes": "40726"
},
{
"name": "Python",
"bytes": "15229"
},
{
"name": "Shell",
"bytes": "40"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'functional_test.views.home', name='home'),
url(r'^login/(?P<provider_name>\w+)',
'functional_test.views.login', name='login'),
)
|
{
"content_hash": "661b861c02913a25cbd8d78caa9ac2d3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 77,
"avg_line_length": 39.75,
"alnum_prop": 0.4779874213836478,
"repo_name": "peterhudec/authomatic",
"id": "5555a24e0dec37ad02df29b3db70d42f458ee016",
"size": "342",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/django/functional_test/functional_test/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "13463"
},
{
"name": "HTML",
"bytes": "5436"
},
{
"name": "Python",
"bytes": "363520"
},
{
"name": "Shell",
"bytes": "1336"
}
],
"symlink_target": ""
}
|
__version__ = '0.2.12'
def validate_gateway(gateway):
"""Test that a gateway is correctly set up.
Returns True if successful, or an error message."""
from hiicart.gateway.base import GatewayError
from hiicart.gateway.amazon.gateway import AmazonGateway
from hiicart.gateway.google.gateway import GoogleGateway
from hiicart.gateway.paypal.gateway import PaypalGateway
from hiicart.gateway.paypal2.gateway import Paypal2Gateway
from hiicart.gateway.paypal_adaptive.gateway import PaypalAPGateway
if gateway == "amazon":
cls = AmazonGateway
elif gateway == "google":
cls = GoogleGateway
elif gateway == "paypal":
cls = PaypalGateway
elif gateway == "paypal2":
cls = Paypal2Gateway
elif gateway == "paypal_adaptive":
cls = PaypalAPGateway
try:
obj = cls()
return obj._is_valid() or "Authentication Error"
except GatewayError, err:
return err.message
|
{
"content_hash": "534621aab2d413f05a778304e07b4e1d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 71,
"avg_line_length": 36,
"alnum_prop": 0.6862139917695473,
"repo_name": "kbourgoin/hiicart",
"id": "d5081163096ca5ecebdbfa073f20256a080e6b95",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hiicart/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "444850"
}
],
"symlink_target": ""
}
|
import TownLoader
import TTStreet
from toontown.suit import Suit
class TTTownLoader(TownLoader.TownLoader):
def __init__(self, hood, parentFSM, doneEvent):
TownLoader.TownLoader.__init__(self, hood, parentFSM, doneEvent)
self.streetClass = TTStreet.TTStreet
self.musicFile = 'phase_3.5/audio/bgm/TC_SZ.ogg'
self.activityMusicFile = 'phase_3.5/audio/bgm/TC_SZ_activity.ogg'
self.townStorageDNAFile = 'phase_5/dna/storage_TT_town.dna'
def load(self, zoneId):
TownLoader.TownLoader.load(self, zoneId)
Suit.loadSuits(1)
dnaFile = 'phase_5/dna/toontown_central_' + str(self.canonicalBranchZone) + '.dna'
self.createHood(dnaFile)
def unload(self):
Suit.unloadSuits(1)
TownLoader.TownLoader.unload(self)
|
{
"content_hash": "e9802de1e6dd2c8631273d51158e7772",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 90,
"avg_line_length": 36.40909090909091,
"alnum_prop": 0.6803995006242197,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "7f402e20521edd4b3108929cf3f84579ad18d2d3",
"size": "801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/town/TTTownLoader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
}
|
# Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle API Gateway events"""
import logging
import boto3
import botocore
from tryagain import retries
from ...utils import (add_lambda_permissions, get_details, get_env_credential, get_lambda_alias_arn,
get_lambda_arn, get_properties)
LOG = logging.getLogger(__name__)
class APIGateway:
"""Class to handle API Gateway and Lambda integration.
Args:
app (str): Application Name
env (str): Environment/account for deployments
region (str): AWS Region
rules (dict): Trigger settings
prop_path (str): Path to the raw.properties.json
"""
def __init__(self, app='', env='', region='', rules={}, prop_path=''):
self.log = logging.getLogger(__name__)
self.generated = get_details(app=app, env=env)
self.trigger_settings = rules
self.app_name = self.generated.app_name()
self.env = env
self.account_id = get_env_credential(env=self.env)['accountId']
self.region = region
self.properties = get_properties(properties_file=prop_path, env=self.env, region=self.region)
session = boto3.Session(profile_name=env, region_name=region)
self.client = session.client('apigateway')
self.lambda_client = session.client('lambda')
self.api_version = self.lambda_client.meta.service_model.api_version
self.api_id = self.find_api_id()
self.resource_id, self.parent_id = self.find_resource_ids()
def find_api_id(self):
"""Given API name, find API ID."""
allapis = self.client.get_rest_apis()
api_name = self.trigger_settings['api_name']
api_id = None
for api in allapis['items']:
if api['name'] == api_name:
api_id = api['id']
self.log.info("Found API for: %s", api_name)
break
else:
api_id = self.create_api()
return api_id
def find_resource_ids(self):
"""Given a resource path and API Id, find resource Id."""
all_resources = self.client.get_resources(restApiId=self.api_id)
parent_id = None
resource_id = None
for resource in all_resources['items']:
if resource['path'] == "/":
parent_id = resource['id']
if resource['path'] == self.trigger_settings['resource']:
resource_id = resource['id']
self.log.info("Found Resource ID for: %s", resource['path'])
return resource_id, parent_id
def add_lambda_integration(self):
"""Attach lambda found to API."""
lambda_uri = self.generate_uris()['lambda_uri']
api_type = None
if 'api_type' in self.trigger_settings:
api_type = self.trigger_settings['api_type']
self.log.info("Found API Integration Type: %s", api_type)
else:
api_type = 'AWS'
self.client.put_integration(
restApiId=self.api_id,
resourceId=self.resource_id,
httpMethod=self.trigger_settings['method'],
integrationHttpMethod='POST',
uri=lambda_uri,
type=api_type)
self.add_integration_response()
self.log.info("Successfully added Lambda intergration to API")
def add_integration_response(self):
"""Add an intergation response to the API for the lambda integration."""
self.client.put_integration_response(
restApiId=self.api_id,
resourceId=self.resource_id,
httpMethod=self.trigger_settings['method'],
statusCode='200',
responseTemplates={'application/json': ''})
def add_permission(self):
"""Add permission to Lambda for the API Trigger."""
statement_id = '{}_api_{}'.format(self.app_name, self.trigger_settings['api_name'])
principal = 'apigateway.amazonaws.com'
lambda_alias_arn = get_lambda_alias_arn(self.app_name, self.env, self.region)
lambda_unqualified_arn = get_lambda_arn(self.app_name, self.env, self.region)
resource_name = self.trigger_settings.get('resource', '')
resource_name = resource_name.replace('/', '')
method_api_source_arn = 'arn:aws:execute-api:{}:{}:{}/{}/{}/{}'.format(
self.region, self.account_id, self.api_id, self.env, self.trigger_settings['method'], resource_name)
global_api_source_arn = 'arn:aws:execute-api:{}:{}:{}/*/*/{}'.format(self.region, self.account_id, self.api_id,
resource_name)
add_lambda_permissions(
function=lambda_alias_arn,
statement_id=statement_id + self.trigger_settings['method'],
action='lambda:InvokeFunction',
principal=principal,
env=self.env,
region=self.region,
source_arn=method_api_source_arn)
add_lambda_permissions(
function=lambda_alias_arn,
statement_id=statement_id,
action='lambda:InvokeFunction',
principal=principal,
env=self.env,
region=self.region,
source_arn=global_api_source_arn)
add_lambda_permissions(
function=lambda_unqualified_arn,
statement_id=statement_id + self.trigger_settings['method'],
action='lambda:InvokeFunction',
principal=principal,
env=self.env,
region=self.region,
source_arn=method_api_source_arn)
add_lambda_permissions(
function=lambda_unqualified_arn,
statement_id=statement_id,
action='lambda:InvokeFunction',
principal=principal,
env=self.env,
region=self.region,
source_arn=global_api_source_arn)
@retries(max_attempts=5, wait=2, exceptions=(botocore.exceptions.ClientError))
def create_api_deployment(self):
"""Create API deployment of ENV name."""
try:
self.client.create_deployment(restApiId=self.api_id, stageName=self.env)
self.log.info('Created a deployment resource.')
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code == 'TooManyRequestsException':
self.log.debug('Retrying. We have hit api limit.')
else:
self.log.debug('Retrying. We received %s.', error_code)
def create_api_key(self):
"""Create API Key for API access."""
apikeys = self.client.get_api_keys()
for key in apikeys['items']:
if key['name'] == self.app_name:
self.log.info("Key %s already exists", self.app_name)
break
else:
self.client.create_api_key(
name=self.app_name, enabled=True, stageKeys=[{
'restApiId': self.api_id,
'stageName': self.env
}])
self.log.info("Successfully created API Key %s. Look in the AWS console for the key", self.app_name)
def _format_base_path(self, api_name):
"""Format the base path name."""
name = self.app_name
if self.app_name != api_name:
name = '{0}-{1}'.format(self.app_name, api_name)
return name
def update_api_mappings(self):
"""Create a cname for the API deployment."""
response_provider = None
response_action = None
domain = self.generated.apigateway()['domain']
try:
response_provider = self.client.create_base_path_mapping(
domainName=domain,
basePath=self._format_base_path(self.trigger_settings['api_name']),
restApiId=self.api_id,
stage=self.env, )
response_action = 'API mapping added.'
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code == 'ConflictException':
response_action = 'API mapping already exist.'
else:
response_action = 'Unknown error: {0}'.format(error_code)
self.log.debug('Provider response: %s', response_provider)
self.log.info(response_action)
return response_provider
def generate_uris(self):
"""Generate several lambda uris."""
lambda_arn = "arn:aws:execute-api:{0}:{1}:{2}/*/{3}/{4}".format(self.region, self.account_id, self.api_id,
self.trigger_settings['method'],
self.trigger_settings['resource'])
lambda_uri = ("arn:aws:apigateway:{0}:lambda:path/{1}/functions/"
"arn:aws:lambda:{0}:{2}:function:{3}/invocations").format(self.region, self.api_version,
self.account_id, self.app_name)
api_dns = "https://{0}.execute-api.{1}.amazonaws.com/{2}".format(self.api_id, self.region, self.env)
uri_dict = {'lambda_arn': lambda_arn, 'lambda_uri': lambda_uri, 'api_dns': api_dns}
return uri_dict
def create_api(self):
"""Create the REST API."""
created_api = self.client.create_rest_api(name=self.trigger_settings.get('api_name', self.app_name))
api_id = created_api['id']
self.log.info("Successfully created API")
return api_id
def create_resource(self, parent_id=""):
"""Create the specified resource.
Args:
parent_id (str): The resource ID of the parent resource in API Gateway
"""
resource_name = self.trigger_settings.get('resource', '')
resource_name = resource_name.replace('/', '')
if not self.resource_id:
created_resource = self.client.create_resource(
restApiId=self.api_id, parentId=parent_id, pathPart=resource_name)
self.resource_id = created_resource['id']
self.log.info("Successfully created resource")
else:
self.log.info("Resource already exists. To update resource please delete existing resource: %s",
resource_name)
def attach_method(self, resource_id):
"""Attach the defined method."""
try:
_response = self.client.put_method(
restApiId=self.api_id,
resourceId=resource_id,
httpMethod=self.trigger_settings['method'],
authorizationType="NONE",
apiKeyRequired=False, )
self.log.debug('Response for resource (%s) push authorization: %s', resource_id, _response)
_response = self.client.put_method_response(
restApiId=self.api_id,
resourceId=resource_id,
httpMethod=self.trigger_settings['method'],
statusCode='200')
self.log.debug('Response for resource (%s) no authorization: %s', resource_id, _response)
self.log.info("Successfully attached method: %s", self.trigger_settings['method'])
except botocore.exceptions.ClientError:
self.log.info("Method %s already exists", self.trigger_settings['method'])
def setup_lambda_api(self):
"""A wrapper for all the steps needed to setup the integration."""
self.create_resource(self.parent_id)
self.attach_method(self.resource_id)
self.add_lambda_integration()
self.add_permission()
self.create_api_deployment()
self.create_api_key()
self.update_api_mappings()
|
{
"content_hash": "6be8e307baeba2b6060f4ae1de200b45",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 119,
"avg_line_length": 43.19230769230769,
"alnum_prop": 0.5841495992876224,
"repo_name": "gogoair/foremast",
"id": "60cb5ac99aa4dacd7b76a34ca68801962910bd1d",
"size": "12353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/foremast/awslambda/api_gateway_event/api_gateway_event.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7614"
},
{
"name": "Python",
"bytes": "484364"
},
{
"name": "Shell",
"bytes": "180"
}
],
"symlink_target": ""
}
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016-2018 Dietrich Pescoller @Microgate SRL
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Oscilloscopes
from .hmo1002 import hmo1002
from .rtc1002 import rtc1002
# Digital Multimeters
from .hmc8012 import hmc8012
|
{
"content_hash": "904888562b3ea74ca563bf4bf408ebd4",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 40.903225806451616,
"alnum_prop": 0.805205047318612,
"repo_name": "Diti24/python-ivi",
"id": "d9316e82bfbffab8d9c9ffe288c785ab6b35e45d",
"size": "1268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ivi/rohdeschwarz/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1992462"
}
],
"symlink_target": ""
}
|
def bucket_sort(arr):
''' Bucket Sort
Complexity: O(n^2)
The complexity is dominated by nextSort
'''
# The number of buckets and make buckets
num_buckets = len(arr)
buckets = [[] for bucket in range(num_buckets)]
# Assign values into bucket_sort
for value in arr:
index = value * num_buckets // (max(arr) + 1)
buckets[index].append(value)
# Sort
sorted_list = []
for i in range(num_buckets):
sorted_list.extend(next_sort(buckets[i]))
return sorted_list
def next_sort(arr):
# We will use insertion sort here.
for i in range(1, len(arr)):
j = i - 1
key = arr[i]
while arr[j] > key and j >= 0:
arr[j+1] = arr[j]
j = j - 1
arr[j + 1] = key
return arr
|
{
"content_hash": "5afc0f65fe2aeb3b986e537e723fab1a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 53,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.55,
"repo_name": "amaozhao/algorithms",
"id": "d89232ccfc37674df7b54375363e5dddc92ba154",
"size": "800",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "algorithms/sort/bucket_sort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "469268"
}
],
"symlink_target": ""
}
|
from selenium import webdriver
from getpass import getpass
usr = input('Enter your username or email : ')
pwd = getpass('Enter your password : ')
driver = webdriver.Chrome()
driver.get('https://twitter.com/login')
usr_box = driver.find_element_by_class_name('js-username-field')
usr_box.send_keys(usr)
pwd_box = driver.find_element_by_class_name('js-password-field')
pwd_box.send_keys(pwd)
login_button = driver.find_element_by_css_selector('button.submit.EdgeButton.EdgeButton--primary.EdgeButtom--medium')
login_button.submit()
|
{
"content_hash": "20ef5674f2ca09256560087472e98095",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 117,
"avg_line_length": 31.470588235294116,
"alnum_prop": 0.7570093457943925,
"repo_name": "umangahuja1/Python",
"id": "7cac446febe49c9ef430ab223b27e8fd1f7f7c13",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Automation/twitter_login.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13427"
}
],
"symlink_target": ""
}
|
import numpy as np
from nlpaug.model.audio import Audio
class Crop(Audio):
def manipulate(self, data, start_pos, end_pos):
aug_data = data.copy()
aug_data = np.delete(aug_data, np.s_[start_pos:end_pos])
return aug_data
|
{
"content_hash": "f756fe6dc22b0d11af824042cb4d77e7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 25,
"alnum_prop": 0.648,
"repo_name": "makcedward/nlpaug",
"id": "277e337ec924a583578283d1f637b5cb6024bfb4",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nlpaug/model/audio/crop.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "777279"
},
{
"name": "Python",
"bytes": "512156"
},
{
"name": "Shell",
"bytes": "2004"
}
],
"symlink_target": ""
}
|
"""All the views for DrawWrite."""
# Imports {{{
import logging
from base64 import b64decode
from itertools import zip_longest
from django.core.files.base import ContentFile
from django.db import IntegrityError
from django.http import HttpResponseBadRequest, HttpResponseNotAllowed, JsonResponse
from django.shortcuts import redirect, render
from drawwrite.forms import CreateGameForm, JoinGameForm
from drawwrite.models import Chain, Game, DrawLink, Player, WriteLink
from . import services
from .bracefmt import BraceFormatter as __
# }}}
LOG = logging.getLogger(__name__)
# index {{{
def index(request):
"""
The front page of the app.
"""
LOG.debug("enter index")
# Create the two forms that we'll put on this page.
create_form = CreateGameForm()
join_form = JoinGameForm()
# TODO errors shouldn't get added by title and description, but by number.
# Then I should look up the title and description from that number.
return render(request, 'drawwrite/index.html', {
'create_form': create_form,
'join_form': join_form,
'error_title': request.session.pop('error_title', None),
'error_description': request.session.pop('error_description', None),
})
# }}}
# join_game {{{
def join_game(request):
"""
Proccess data that a user sends when they want to join a game.
"""
LOG.debug('enter')
# Send all non-POSTs to the index.
if request.method != 'POST':
LOG.info(__('attempted non-supported method {0}', request.method))
request.session['error_title'] = 'Unsupported method'
request.session['error_description'] = (
'You\'re not allowed to send {0} requests to that endpoint.'.format(request.method),
)
return redirect('drawwrite:index')
# Get the form from the POSTed data.
form = JoinGameForm(request.POST)
# Invalid forms redirect to the index with an error.
if not form.is_valid():
LOG.debug(__(
'name {0} or gamename {1} invalid',
form.data['username'],
form.data['gamename'],
))
request.session['error_title'] = 'Invalid input'
request.session['error_description'] = ' '.join((
'Your Name and the Game Name must only contain letters, numbers,',
'underscores, and hyphens.',
))
return redirect('drawwrite:index')
# Valid forms are processed.
gamename = form.cleaned_data['gamename']
username = form.cleaned_data['username']
# Get the game. On error, add error objects to the session and redirect
# to index.
# TODO extract this, possibly to services.py
games = Game.objects.filter( #pylint: disable=no-member
name=gamename,
).filter(
started=False,
)
if len(games) > 1:
LOG.error(__('somehow, two games with name {0} are being created', gamename))
request.session['error_title'] = 'Non-unique game name'
request.session['error_description'] = 'Could not find a unique game for you to join'
return redirect('drawwrite:index')
if len(games) < 1:
LOG.error(__('tried to join non-existant game {0}', gamename))
request.session['error_title'] = 'Non-existent game'
request.session['error_description'] = ' '.join((
'The game that you attempted to join, {0},'.format(gamename),
'does not exist. Please check that you entered it correctly.',
))
return redirect('drawwrite:index')
game = games[0]
LOG.debug(__('got game for player {0}', username))
# Add a player to the game. On error, add error objects to the session and
# redirect to index.
player = None
try:
player = services.new_player(game, username, False)
except services.GameAlreadyStarted:
LOG.debug(__('could not add {0} to game {1}', username, game.name))
request.session['error_title'] = 'Game started'
request.session['error_description'] = ' '.join((
'The game that you attempted to join has already started. Please',
'either join a different game or start your own game.',
))
return redirect('drawwrite:index')
# TODO don't assume that all IntegrityError's mean that the game name is
# already taken. There are plenty of other explanations that I'm
# silencing by doing this.
except IntegrityError:
LOG.exception(__(
'player with {0} already exists in game {1}',
username,
gamename,
))
request.session['error_title'] = 'Player exists'
request.session['error_description'] = ' '.join((
'The player name that you entered is already in use in the game',
'that you are trying to join. Please choose a new player name',
'and try again.',
))
return redirect('drawwrite:index')
# Redirect to that game's page.
LOG.debug('exiting join game view')
return redirect('drawwrite:play', player.pk)
# }}}
# create_game {{{
def create_game(request):
"""
Create a game according to the values the user specified in the form.
"""
LOG.debug('entering create game view')
# Send all non-POSTs to the index.
if request.method != 'POST':
LOG.debug(__('attempted non-supported method {0}', request.method))
return redirect('drawwrite:index')
# Get the form from the POSTed data.
form = CreateGameForm(request.POST)
# Invalid forms redirect to the index with an error.
if not form.is_valid():
#LOG.debug(__(
# 'username {0} or gamename {1} invalid',
# form.data['username'],
# form.data['gamename'],
#))
LOG.debug(__(
'form error: {0}',
form.errors,
))
request.session['error_title'] = 'Invalid input'
request.session['error_description'] = ' '.join((
'Your Name and the Game Name must only contain letters, numbers,',
'underscores, and hyphens.',
))
return redirect('drawwrite:index')
# Valid forms are processed.
gamename = form.cleaned_data['gamename']
username = form.cleaned_data['username']
# Create game. On error, add error objects to the session and redirect
# to index.
# TODO handle other errors that could happen?
game = services.new_game(gamename)
if game is None:
request.session['error_title'] = 'Game being created'
request.session['error_description'] = (
'The game you are trying to join, {0}, is already being created'
).format(gamename)
# Create a player for that game. On error, add error objects to the
# session and redirect to index.
player = None
try:
player = services.new_player(game, username, True)
# TODO don't assume that all IntegrityError's mean that the user name is
# already taken. There are plenty of other explanations that I'm
# silencing by doing this.
except services.NameTaken as exception:
LOG.error('player name already taken')
request.session['error_title'] = 'Player name taken'
request.session['error_description'] = exception.message()
return redirect('drawwrite:index')
except IntegrityError:
LOG.error(__('a new game has an invalid player {0}', username))
request.session['error_title'] = 'Player name taken'
request.session['error_description'] = ' '.join((
'The player name that you entered, {0},'.format(username),
' is already taken for the game that you entered. Please',
'try a different one.',
))
return redirect('drawwrite:index')
# Redirect to that game's page.
LOG.debug('exiting create game view')
return redirect('drawwrite:play', player.pk)
# }}}
# play {{{
def play(request, player_id):
"""
The page on which players play the game.
"""
LOG.debug('enter play view')
# Get their player from the database using the id in the path. On error,
# set error session attributes and redirect to index.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__('non-existant player attempt: {0}', player_id))
request.session['error_title'] = 'Player Does Not Exist'
request.session['error_description'] = ' '.join((
'You attempted to access a non-existant player. Plase do not',
'do that.',
))
return redirect('drawwrite:index')
LOG.debug(__('successfully retreived player {0}', player_id))
# Get the game from the player object.
game = player.game
LOG.debug(__('successfully retreived game for player {0}', player_id))
# If the game hasn't started, show the player the waiting screen.
if not game.started:
LOG.debug(__('game for player {0} has not started', player_id))
# Get a list of all players in this game.
all_players = Player.objects.filter(game=game) #pylint: disable=no-member
LOG.debug(__('got players in game with player {0}', player_id))
# Get the creator of the game.
creator = None
for player in all_players:
if player.was_creator:
creator = player
LOG.debug(__('creator of game is {0}', creator.name))
# Render the waiting screen with all of those players.
LOG.debug(__('showing player {0} the waiting screen', player_id))
return render(request, 'drawwrite/waiting.html', {
'all_players' : all_players,
'player_id' : player_id,
'created' : player.was_creator,
'creator' : creator,
})
LOG.debug(__('game for player {0} has started', player_id))
# The game has started. Check if it's also finished.
if game.round_num >= game.num_players:
LOG.debug('game finished, redirect to view page')
return redirect('drawwrite:showGame', game.pk)
# The game has started, so decide whether to show the waiting page.
if player.current_round == game.round_num + 1:
# If the player's round equals the number of players in the game,
# show the 'wait for game completion' game.
if player.current_round == player.game.num_players:
LOG.debug('show game finished waiting page')
return render(request, 'drawwrite/gameWaiting.html', {
'game_id' : game.pk,
})
# If the game isn't finished, show the waiting page for the next round.
LOG.debug('show waiting page, this user is done with current round')
return render(request, 'drawwrite/roundWaiting.html', {
'player_id' : player_id,
})
# If the player's round doesn't equal the game's round, something is fishy.
elif not player.current_round == game.round_num:
LOG.error(__(
'player {0} has round {1}, while game {2} has round {3}',
player_id,
player.current_round,
game.pk,
game.round_num,
))
# TODO come up with a better thing to show the user in this case
return HttpResponseBadRequest()
# Figure out which position's chain this player should have access to next.
chain_pos_to_get = (player.position + game.round_num) % game.num_players
LOG.debug(__('player {0} needs position {1}s chain', player_id, chain_pos_to_get))
# Get the owner of the chain that player will edit.
chain_owner = None
try:
chain_owner = Player.objects.filter( #pylint: disable=no-member
game=game,
).get(
position=chain_pos_to_get,
)
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__(
'player with game {0} and pos {1} does not exist',
game.pk,
chain_pos_to_get,
))
request.session['error_title'] = 'Player Does Not Exist'
request.session['error_description'] = ' '.join((
'You tried to get a player that does not exist. Sorry for',
'the inconvenience.',
))
return redirect('drawwrite:index')
LOG.debug(__('got chain_owner ({0}) for player {1}', chain_owner.pk, player_id))
# Get the chain for the player.
chain = None
try:
chain = Chain.objects.get(player=chain_owner) #pylint: disable=no-member
except Chain.DoesNotExist: #pylint: disable=no-member
# Make a chain for this player.
chain = services.new_chain(player)
LOG.debug(__('got chain for user {0}', player_id))
# If the chain has no links, show the player a screen to enter their first
# text link.
if chain.next_link_position == 0:
LOG.debug(__('returning page for first link for user {0}', player_id))
return render(request, 'drawwrite/chainAdd.html', {
'prev_link_type': '',
'prev_link': None,
'player_id': player_id,
})
# Figure out what type of link the player needs to make.
prev_link_pos = chain.next_link_position - 1
prev_link = None
prev_link_type = ''
if prev_link_pos % 2 == 0:
prev_link_type = 'write'
prev_link = WriteLink.objects.get( #pylint: disable=no-member
chain=chain,
link_position=prev_link_pos
)
else:
prev_link_type = 'draw'
prev_link = DrawLink.objects.get( #pylint: disable=no-member
chain=chain,
link_position=prev_link_pos
)
# Show the player a page to add the next link type.
LOG.debug('exit add to chain view')
return render(request, 'drawwrite/chainAdd.html', {
'prev_link_type': prev_link_type,
'prev_link': prev_link,
'player_id': player_id,
})
# }}}
# check_game_start {{{
def check_game_start(request, player_id): #pylint: disable=unused-argument
"""Check if the passed player's game has started."""
LOG.debug(__('checking game status for player {0}', player_id))
# Get the player.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__('non-existant player: {0}', player_id))
return HttpResponseBadRequest()
LOG.debug(__('successfully found player {0}', player_id))
# If the player's game has not started, return an updated list of names.
if not player.game.started:
LOG.debug(__('player {0} game has not started', player_id))
# Get all the players in the game.
all_players = Player.objects.filter(game=player.game) #pylint: disable=no-member
LOG.debug(__('got all players in game with {0}', player_id))
# Create a list of all player names.
names = []
for player in all_players:
names.append(player.name)
LOG.debug('made list of all player names')
# Return the data we need.
return JsonResponse({'started': False, 'names': names})
# If the player's game has started, return an object indicating as much.
return JsonResponse({'started': True, 'names': []})
# }}}
# start_game {{{
def start_game(request, player_id):
"""Start the game of the player identified by player_id"""
LOG.debug(__('starting game of player {0}', player_id))
# Make sure method is POST.
if not request.method == 'POST':
LOG.error('attempted to GET to start game')
return HttpResponseBadRequest()
# Get the player.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__('non-existant player {0}', player_id))
return HttpResponseBadRequest()
LOG.debug(__('successfully got player {0}', player_id))
# Set the player's game to 'started'.
services.start_game(player.game)
LOG.debug('set players game to started')
# Redirect to 'play'.
LOG.debug('redirecting to play')
return redirect('drawwrite:play', player_id)
# }}}
# create_link {{{
def create_link(request, player_id):
"""
Accept POST data and create a new link in the chain that player_id should
be adding to.
"""
LOG.debug(__('creating link for player {0}', player_id))
# Only accept POSTs
if not request.method == 'POST':
LOG.error('should have POSTed data')
return HttpResponseNotAllowed(['POST'])
LOG.debug(__('got POST data for player {0}', player_id))
# Get the player.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__('non-existant player {0}', player_id))
request.session['error_title'] = 'Player Does Not Exist'
request.session['error_description'] = ' '.join((
'The player that you tried to create a link for does not exist.',
'We apologize for the inconvenience.',
))
return redirect('drawwrite:index')
LOG.debug(__('got the player with pk {0}', player_id))
# Calculate the position of the player that this player_id is adding to.
chain_owner_pos = (player.position + player.game.round_num) % player.game.num_players
LOG.debug(__('player {0} needs chain of player {1}', player_id, chain_owner_pos))
# Get the owner of the chain this player is adding to.
try:
chain_owner = Player.objects.filter( #pylint: disable=no-member
game=player.game,
).get(
position=chain_owner_pos,
)
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__(
'player with game {0} and position {1} does not exist',
player.game.pk,
chain_owner_pos,
))
request.session['error_title'] = 'Player Does Not Exist'
request.session['description'] = ' '.join((
'You attempted to access a player that does not exist. We are',
'sorry for the inconvenience.',
))
return redirect('drawwrite:index')
LOG.debug(__('successfully got chain owner for player {0}', player_id))
# Get the player's chain.
chain = None
try:
chain = Chain.objects.get(player=chain_owner) #pylint: disable=no-member
except Chain.DoesNotExist: #pylint: disable=no-member
LOG.error(__('player {0} should have a chain but does not', player_id))
request.session['error_title'] = 'Player Has No Chain'
request.session['error_description'] = ' '.join((
'The player that you tried to create a link for does not have',
'a chain, but that should not be possible. We apologize for',
'the inconvenience.',
))
return redirect('drawwrite:index')
LOG.debug(__('got the chain for player with pk {0}', player_id))
# Figure out what type of link to make.
if chain.next_link_position % 2 == 0:
# The POST data needs to have the 'description' field or something
# is wrong.
if 'description' not in request.POST.keys():
LOG.error(' '.join((
'should be making write link, but did not receive any',
'writing in the POSTed data',
)))
return HttpResponseBadRequest()
LOG.debug(__('making new write link for player {0}', player_id))
# Make the new write link.
services.new_write_link(chain, request.POST.get('description'), player)
else:
# The POST data needs to have the 'drawing' field or something
# is wrong.
if 'drawing' not in request.POST.keys():
LOG.error(' '.join((
'should be making a draw link, but did not receive any',
'drawing data in the POSTed data',
)))
return HttpResponseBadRequest()
LOG.debug('got image data to save')
# Make sure the data starts with 'data:image/png;base64,'
data_string = request.POST.get('drawing')
if not data_string.startswith('data:image/png;base64,'):
LOG.error(__('got bad image data: started with {0}', data_string[0:15]))
return HttpResponseBadRequest()
LOG.debug('got good(ish) image data')
# Shave off the stuff from above.
data_string = data_string.split(';base64,')[1]
LOG.debug('split off the ;base64, stuff')
# Decode the base64 data.
binary_data = b64decode(data_string)
LOG.debug('decoded base64 data')
# Make a file-like object out of the data.
file_name = "link-{0}-{1}.png".format(player_id, chain.next_link_position)
file_obj = ContentFile(binary_data, name=file_name)
LOG.debug(__('made file with name {0}', file_name))
# Make the draw link.
services.new_draw_link(chain, file_obj, player)
LOG.debug(__('created draw link, file has name {0}', file_name))
# Increase the 'num_players_finished_current_round' of this game.
services.player_finished(player)
# Redirect to 'play'.
return redirect('drawwrite:play', player_id)
# }}}
# check_round_done {{{
def check_round_done(request, player_id):
"""
Check if the round of the current game is completed. Return a javascript
object that has a list of every player's name that has not completed the round.
"""
LOG.debug(__('checking if round is completed for player {0}', player_id))
# Get the player.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error('attempted to get player that does not exist')
request.session['error_title'] = 'Player Does Not Exist'
request.session['error_description'] = ' '.join((
'The player that you attempted to get does not exist. We are',
'sorry for the inconvenience.',
))
return redirect('drawwrite:index')
LOG.debug(__('successfully got player {0}', player_id))
# Check if the game round equals the player's round. If so, then the
# player is allowed to move on. Otherwise, they're not.
if player.game.round_num == player.current_round:
LOG.debug('round is completed')
# Return an object saying that the round is done.
return JsonResponse({'finished': True})
LOG.debug('round is not completed')
# Get all players in the game who have not completed the
# current round.
try:
players_still_playing = Player.objects.filter( #pylint: disable=no-member
game=player.game,
).filter(
current_round__lt=player.current_round,
)
except BaseException as exception:
LOG.error(exception)
raise
LOG.debug('got list of players still playing')
# Turn the players into a list of names.
names_still_playing = []
for player in players_still_playing:
names_still_playing.append(player.name)
LOG.debug('got list of names of players still playing')
# Return an object saying that the round is not done.
return JsonResponse({
'finished': False,
'still_playing': names_still_playing,
})
# }}}
# check_game_done {{{
def check_game_done(request, game_id): #pylint: disable=unused-argument
"""Check if the game with the passed game_id is finished."""
LOG.debug(__('checking if game {0} is done', game_id))
# Get the game.
game = None
try:
game = Game.objects.get(pk=game_id) #pylint: disable=no-member
except Game.DoesNotExist: #pylint: disable=no-member
LOG.error(__('tried to get non-existant game {0}', game_id))
# TODO better error stuff
return HttpResponseBadRequest
LOG.debug(__('got game {0}', game_id))
# Check if the round equals the number of players.
if game.round_num == game.num_players:
return JsonResponse({'finished': True})
# Get a list of players whose current round equals the game's round.
try:
players_still_playing = Player.objects.filter( #pylint: disable=no-member
game=game,
).filter(
current_round=game.round_num,
)
except BaseException as exception:
LOG.error(exception)
raise
LOG.debug('got list of players still playing')
# Turn that list of players into a list of names.
names_still_playing = []
for player in players_still_playing:
names_still_playing.append(player.name)
LOG.debug('created list of names of players still playing')
# Return an object saying that the round is not done.
return JsonResponse({
'finished': False,
'still_playing': names_still_playing,
})
# }}}
# show_game {{{
def show_game(request, game_id):
"""Show a completed game."""
LOG.debug(__('showing game {0}', game_id))
# Get the game.
game = None
try:
game = Game.objects.get(pk=game_id) #pylint: disable=no-member
except Game.DoesNotExist: #pylint: disable=no-member
LOG.error(__('tried to get non-existant game {0}', game_id))
# TODO better error here
return HttpResponseBadRequest()
LOG.debug(__('got game {0}', game_id))
# Get all players associated with that game.
players = Player.objects.filter(game=game) #pylint: disable=no-member
# Render the game view page.
# Change gameName to game_name
return render(request, 'drawwrite/game.html', {
'players': players,
'game_name': game.name,
})
# }}}
# show_chain {{{
def show_chain(request, player_id):
"""Show a completed chain."""
LOG.debug(__('showing chain of player {0}', player_id))
# Get the player.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__('tried to get non-existant player {0}', player_id))
# TODO better error messege
return HttpResponseBadRequest()
LOG.debug(__('got player {0}', player_id))
# Get the chain.
chain = None
try:
chain = Chain.objects.get(player=player) #pylint: disable=no-member
except Chain.DoesNotExist: #pylint: disable=no-member
LOG.error(__('tried to get non-existant chain for player {0}', player_id))
# TODO better error message
return HttpResponseBadRequest()
LOG.debug(__('got chain for player {0}', player_id))
# Get all the write links and all the draw links.
write_links = WriteLink.objects.filter(chain=chain) #pylint: disable=no-member
draw_links = DrawLink.objects.filter(chain=chain) #pylint: disable=no-member
# Make a list of all the links in the chain.
links = []
for write, draw in zip_longest(write_links, draw_links):
if write is not None:
links.append(write)
if draw is not None:
links.append(draw)
LOG.debug(__('made list of all links for player {0}', player_id))
# Render the chain view.
return render(request, 'drawwrite/chain.html', {
'links': links,
'player': player,
})
# }}}
# get_available_games {{{
def get_available_games(request):
"""Return a list of game names that may be joined."""
being_created = Game.objects.filter(started=False) #pylint: disable=no-member
options = []
for game in being_created:
options.append(game.name)
LOG.debug('returning list of available games')
return JsonResponse({'options': options})
# }}}
|
{
"content_hash": "82947d3402cfd47239cb35d92a535201",
"timestamp": "",
"source": "github",
"line_count": 747,
"max_line_length": 96,
"avg_line_length": 37.24899598393574,
"alnum_prop": 0.6201617250673854,
"repo_name": "RMMoreton/drawwrite",
"id": "f29eb97bde521edd81ed384744a4767a2260435a",
"size": "27825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drawwritesite/drawwrite/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "23800"
},
{
"name": "JavaScript",
"bytes": "20544"
},
{
"name": "Python",
"bytes": "66448"
}
],
"symlink_target": ""
}
|
from position import Position
class Portfolio(object):
"""Portfolio class"""
instance = None
def __init__(self, list_of_positions, wallet_value):
if Portfolio.instance == None:
super(Portfolio, self).__init__()
self.positions = list_of_positions
self.value = sum(self.positions)
self.wallet_value = wallet_value
Portfolio.instance = self
else:
raise("Portfolio already exists")
def add_position(self, new_pos):
self.positions.append(new_pos)
def get_total_value(self):
return sum([pos.get_value() for pos in self.positions])
|
{
"content_hash": "965b8330a63c19c08ab2be374f9e5651",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 57,
"avg_line_length": 24.434782608695652,
"alnum_prop": 0.702846975088968,
"repo_name": "coders-creed/botathon",
"id": "3e9598e73f3c6b38714b7e52724458f6c7423640",
"size": "711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/models/portfolio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20098"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import json
import warnings
from django import forms
from django.conf import settings
from django.contrib.admin.utils import (
display_for_field, flatten_fieldsets, help_text_for_field, label_for_field,
lookup_field,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.utils import flatatt
from django.template.defaultfilters import capfirst, linebreaksbr
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, ugettext_lazy as _
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(
label='',
required=False,
initial=0,
widget=forms.HiddenInput({'class': 'select-across'}),
)
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, fieldsets
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(
self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
@property
def errors(self):
return self.form.errors
@property
def non_field_errors(self):
return self.form.non_field_errors
@property
def media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = ' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
@property
def media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = [
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra,
]
return forms.Media(js=['admin/js/%s' % url for url in js])
return forms.Media()
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__") or isinstance(field, six.text_type):
self.fields = [field]
else:
self.fields = field
self.has_visible_field = not all(
field in self.form.fields and self.form.fields[field].widget.is_hidden
for field in self.fields
)
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0), model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(
'\n'.join(
self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields
).strip('\n')
)
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
self.is_readonly = False
def label_tag(self):
classes = []
contents = conditional_escape(force_text(self.field.label))
if self.is_checkbox:
classes.append('vCheckboxLabel')
if self.field.field.required:
classes.append('required')
if not self.is_first:
classes.append('inline')
attrs = {'class': ' '.join(classes)} if classes else {}
# checkboxes should not have a label suffix as the checkbox appears
# to the left of the label.
return self.field.label_tag(
contents=mark_safe(contents), attrs=attrs,
label_suffix='' if self.is_checkbox else None,
)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ if field.__name__ != '<lambda>' else ''
else:
class_name = field
if form._meta.labels and class_name in form._meta.labels:
label = form._meta.labels[class_name]
else:
label = label_for_field(field, form._meta.model, model_admin)
if form._meta.help_texts and class_name in form._meta.help_texts:
help_text = form._meta.help_texts[class_name]
else:
help_text = help_text_for_field(class_name, form._meta.model)
self.field = {
'name': class_name,
'label': label,
'help_text': help_text,
'field': field,
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
self.empty_value_display = model_admin.get_empty_value_display()
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
return format_html('<label{}>{}:</label>',
flatatt(attrs),
capfirst(force_text(label)))
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = self.empty_value_display
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
if hasattr(value, "__html__"):
result_repr = value
else:
result_repr = force_text(value)
if getattr(attr, "allow_tags", False):
warnings.warn(
"Deprecated allow_tags attribute used on %s. "
"Use django.utils.html.format_html(), format_html_join(), "
"or django.utils.safestring.mark_safe() instead." % attr,
RemovedInDjango20Warning
)
result_repr = mark_safe(value)
else:
result_repr = linebreaksbr(result_repr)
else:
if isinstance(f.remote_field, ManyToManyRel) and value is not None:
result_repr = ", ".join(map(six.text_type, value.all()))
else:
result_repr = display_for_field(value, f, self.empty_value_display)
result_repr = linebreaksbr(result_repr)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
self.classes = ' '.join(inline.classes) if inline.classes else ''
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
view_on_site_url = self.opts.get_view_on_site_url(original)
yield InlineAdminForm(
self.formset, form, self.fieldsets, self.prepopulated_fields,
original, self.readonly_fields, model_admin=self.opts,
view_on_site_url=view_on_site_url,
)
for form in self.formset.extra_forms:
yield InlineAdminForm(
self.formset, form, self.fieldsets, self.prepopulated_fields,
None, self.readonly_fields, model_admin=self.opts,
)
yield InlineAdminForm(
self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts,
)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field_name:
continue
if field_name in self.readonly_fields:
yield {
'label': label_for_field(field_name, self.opts.model, self.opts),
'widget': {'is_hidden': False},
'required': False,
'help_text': help_text_for_field(field_name, self.opts.model),
}
else:
form_field = self.formset.empty_form.fields[field_name]
label = form_field.label
if label is None:
label = label_for_field(field_name, self.opts.model, self.opts)
yield {
'label': label,
'widget': form_field.widget,
'required': form_field.required,
'help_text': form_field.help_text,
}
def inline_formset_data(self):
verbose_name = self.opts.verbose_name
return json.dumps({
'name': '#%s' % self.formset.prefix,
'options': {
'prefix': self.formset.prefix,
'addText': ugettext('Add another %(verbose_name)s') % {
'verbose_name': capfirst(verbose_name),
},
'deleteText': ugettext('Remove'),
}
})
@property
def forms(self):
return self.formset.forms
@property
def non_form_errors(self):
return self.formset.non_form_errors
@property
def media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None, view_on_site_url=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
self.show_url = original and view_on_site_url is not None
self.absolute_url = view_on_site_url
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields, readonly_fields, model_admin)
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(
self.formset, self.form, name, self.readonly_fields,
model_admin=self.model_admin, **options
)
def needs_explicit_pk_field(self):
# Auto fields are editable (oddly), so need to check for auto or non-editable pk
if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable:
return True
# Also search any parents for an auto field. (The pk info is propagated to child
# models so that does not need to be checked in parents.)
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class AdminErrorList(forms.utils.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
super(AdminErrorList, self).__init__()
if form.is_bound:
self.extend(form.errors.values())
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(errors_in_inline_form.values())
|
{
"content_hash": "8391f44a47f579d216a01b3f6de42ddd",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 113,
"avg_line_length": 37.79259259259259,
"alnum_prop": 0.5797073043251013,
"repo_name": "nemesisdesign/django",
"id": "c433f32579b44a420b8e5b5cb8fdd4a7b77da6d0",
"size": "15306",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "django/contrib/admin/helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53138"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12145639"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import json
from exam import fixture
from sentry.testutils import TestCase
class GroupEventJsonTest(TestCase):
@fixture
def path(self):
return '/{}/{}/issues/{}/events/{}/json/'.format(
self.organization.slug,
self.project.slug,
self.group.id,
self.event.id,
)
def test_does_render(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert resp['Content-Type'] == 'application/json'
data = json.loads(resp.content)
assert data['id'] == self.event.event_id
|
{
"content_hash": "efa5bfe234e706b74124f173db4e7412",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 57,
"avg_line_length": 25.807692307692307,
"alnum_prop": 0.6020864381520119,
"repo_name": "mitsuhiko/sentry",
"id": "c9acc576d3a075e1f0454996f75253d8aec280c4",
"size": "671",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sentry/web/frontend/test_group_event_json.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "171113"
},
{
"name": "Python",
"bytes": "877258"
}
],
"symlink_target": ""
}
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views.generic import DetailView, RedirectView, TemplateView
from apps.webshop.forms import OrderForm
from apps.webshop.models import Category, Order, OrderLine, Product, ProductSize
class LoginRequiredMixin:
@classmethod
def as_view(cls, **initkwargs):
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
class CartMixin:
def get_context_data(self, **kwargs):
context = super(CartMixin, self).get_context_data(**kwargs)
context['order_line'] = self.current_order_line()
return context
def current_order_line(self):
if not self.request.user.is_authenticated:
return None
order_line = OrderLine.objects.filter(user=self.request.user, paid=False).first()
return order_line
class BreadCrumb:
"""Dynamically generated breadcrumbs using name and url"""
def get_breadcrumbs(self):
"""Create breadcrumb for the main webshop page
Returns:
list: list of breadcrumbs
"""
breadcrumbs = [{'name': 'Webshop', 'url': reverse_lazy('webshop_home')}]
return breadcrumbs
def get_context_data(self, **kwargs):
"""Add breadcrumbs to context"""
context = super().get_context_data(**kwargs)
context['breadcrumbs'] = self.get_breadcrumbs()
return context
class WebshopMixin(CartMixin, BreadCrumb):
pass
class Home(WebshopMixin, TemplateView):
template_name = 'webshop/base.html'
def get_breadcrumbs(self):
return None
def get_context_data(self, **kwargs):
context = super(Home, self).get_context_data(**kwargs)
context['products'] = Product.objects.filter(active=True)
return context
class CategoryDetail(WebshopMixin, DetailView):
model = Category
context_object_name = 'category'
template_name = 'webshop/category.html'
def get_breadcrumbs(self):
breadcrumbs = super().get_breadcrumbs()
breadcrumbs.append({'name': self.get_object()})
return breadcrumbs
class ProductDetail(WebshopMixin, DetailView):
model = Product
context_object_name = 'product'
template_name = 'webshop/product.html'
def get_breadcrumbs(self):
breadcrumbs = super().get_breadcrumbs()
breadcrumbs.append({'name': self.get_object()})
return breadcrumbs
def get_context_data(self, **kwargs):
context = super(ProductDetail, self).get_context_data(**kwargs)
context['orderform'] = OrderForm
context['sizes'] = ProductSize.objects.filter(product=self.get_object())
return context
@method_decorator(login_required)
def post(self, request, *args, **kwargs):
product = self.get_object()
if product.deadline and product.deadline < timezone.now():
messages.error(request, "Dette produktet er ikke lenger tilgjengelig.")
return super(ProductDetail, self).get(request, *args, **kwargs)
if not product.in_stock():
messages.error(request, "Dette produktet er utsolgt.")
return super().get(request, *args, **kwargs)
form = OrderForm(request.POST)
if form.is_valid():
order_line = self.current_order_line()
if not order_line:
order_line = OrderLine.objects.create(user=self.request.user)
size = form.cleaned_data['size']
quantity = form.cleaned_data['quantity']
if not product.enough_stock(quantity, size):
messages.error(request, "Det er ikke nok produkter på lageret.")
return super().get(request, *args, **kwargs)
# Checking if product has already been added to cart
order = order_line.orders.filter(product=product, size=size).first()
if order:
# Adding to existing order
order.quantity += quantity
else:
# Creating new order
order = Order(
product=product, price=product.price,
quantity=quantity,
size=size,
order_line=order_line)
order.save()
return redirect('webshop_checkout')
else:
messages.error(request, 'Vennligst oppgi et gyldig antall')
return super(ProductDetail, self).get(request, *args, **kwargs)
class Checkout(LoginRequiredMixin, WebshopMixin, TemplateView):
template_name = 'webshop/checkout.html'
def get_breadcrumbs(self):
breadcrumbs = super().get_breadcrumbs()
breadcrumbs.append({'name': 'Sjekk ut'})
return breadcrumbs
def get(self, request, *args, **kwargs):
order_line = self.current_order_line()
if order_line:
invalid_orders = order_line.orders.filter(Q(product__active=False) |
Q(product__deadline__lt=timezone.now()) |
Q(product__stock=0))
self.remove_inactive_orders(invalid_orders)
return super(Checkout, self).get(request, *args, **kwargs)
def remove_inactive_orders(self, orders):
for order in orders:
if order.product.stock == 0:
message = """Det er ingen {} på lager og varen er fjernet
fra din handlevogn.""".format(order.product.name)
else:
message = """{} er ikke lenger tilgjengelig for kjøp og
er fjernet fra din handlevogn.""".format(order.product.name)
messages.add_message(self.request, messages.INFO, message)
order.delete()
class RemoveOrder(LoginRequiredMixin, WebshopMixin, RedirectView):
pattern_name = 'webshop_checkout'
def post(self, request, *args, **kwargs):
order_line = self.current_order_line()
order_id = request.POST.get('id')
if order_id:
Order.objects.filter(order_line=order_line, id=order_id).delete()
else:
Order.objects.filter(order_line=order_line).delete()
return super(RemoveOrder, self).post(request, *args, **kwargs)
|
{
"content_hash": "4366650427cbe886df202a5b89133cc3",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 95,
"avg_line_length": 36.36871508379888,
"alnum_prop": 0.6219662058371735,
"repo_name": "dotKom/onlineweb4",
"id": "90d3021503f79a362f7e0382aa0a0f8a4de4ef39",
"size": "6537",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/webshop/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "71414"
},
{
"name": "HTML",
"bytes": "463894"
},
{
"name": "JavaScript",
"bytes": "745404"
},
{
"name": "Python",
"bytes": "925584"
},
{
"name": "Shell",
"bytes": "3130"
},
{
"name": "Standard ML",
"bytes": "1088"
}
],
"symlink_target": ""
}
|
"""Transport adapter for Requests."""
from __future__ import absolute_import
import functools
import logging
import numbers
import os
import time
try:
import requests
except ImportError as caught_exc: # pragma: NO COVER
import six
six.raise_from(
ImportError(
"The requests library is not installed, please install the "
"requests package to use the requests transport."
),
caught_exc,
)
import requests.adapters # pylint: disable=ungrouped-imports
import requests.exceptions # pylint: disable=ungrouped-imports
from requests.packages.urllib3.util.ssl_ import (
create_urllib3_context,
) # pylint: disable=ungrouped-imports
import six # pylint: disable=ungrouped-imports
from google.auth import environment_vars
from google.auth import exceptions
from google.auth import transport
import google.auth.transport._mtls_helper
from google.oauth2 import service_account
_LOGGER = logging.getLogger(__name__)
_DEFAULT_TIMEOUT = 120 # in seconds
class _Response(transport.Response):
"""Requests transport response adapter.
Args:
response (requests.Response): The raw Requests response.
"""
def __init__(self, response):
self._response = response
@property
def status(self):
return self._response.status_code
@property
def headers(self):
return self._response.headers
@property
def data(self):
return self._response.content
class TimeoutGuard(object):
"""A context manager raising an error if the suite execution took too long.
Args:
timeout (Union[None, Union[float, Tuple[float, float]]]):
The maximum number of seconds a suite can run without the context
manager raising a timeout exception on exit. If passed as a tuple,
the smaller of the values is taken as a timeout. If ``None``, a
timeout error is never raised.
timeout_error_type (Optional[Exception]):
The type of the error to raise on timeout. Defaults to
:class:`requests.exceptions.Timeout`.
"""
def __init__(self, timeout, timeout_error_type=requests.exceptions.Timeout):
self._timeout = timeout
self.remaining_timeout = timeout
self._timeout_error_type = timeout_error_type
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value:
return # let the error bubble up automatically
if self._timeout is None:
return # nothing to do, the timeout was not specified
elapsed = time.time() - self._start
deadline_hit = False
if isinstance(self._timeout, numbers.Number):
self.remaining_timeout = self._timeout - elapsed
deadline_hit = self.remaining_timeout <= 0
else:
self.remaining_timeout = tuple(x - elapsed for x in self._timeout)
deadline_hit = min(self.remaining_timeout) <= 0
if deadline_hit:
raise self._timeout_error_type()
class Request(transport.Request):
"""Requests request adapter.
This class is used internally for making requests using various transports
in a consistent way. If you use :class:`AuthorizedSession` you do not need
to construct or use this class directly.
This class can be useful if you want to manually refresh a
:class:`~google.auth.credentials.Credentials` instance::
import google.auth.transport.requests
import requests
request = google.auth.transport.requests.Request()
credentials.refresh(request)
Args:
session (requests.Session): An instance :class:`requests.Session` used
to make HTTP requests. If not specified, a session will be created.
.. automethod:: __call__
"""
def __init__(self, session=None):
if not session:
session = requests.Session()
self.session = session
def __call__(
self,
url,
method="GET",
body=None,
headers=None,
timeout=_DEFAULT_TIMEOUT,
**kwargs
):
"""Make an HTTP request using requests.
Args:
url (str): The URI to be requested.
method (str): The HTTP method to use for the request. Defaults
to 'GET'.
body (bytes): The payload or body in HTTP request.
headers (Mapping[str, str]): Request headers.
timeout (Optional[int]): The number of seconds to wait for a
response from the server. If not specified or if None, the
requests default timeout will be used.
kwargs: Additional arguments passed through to the underlying
requests :meth:`~requests.Session.request` method.
Returns:
google.auth.transport.Response: The HTTP response.
Raises:
google.auth.exceptions.TransportError: If any exception occurred.
"""
try:
_LOGGER.debug("Making request: %s %s", method, url)
response = self.session.request(
method, url, data=body, headers=headers, timeout=timeout, **kwargs
)
return _Response(response)
except requests.exceptions.RequestException as caught_exc:
new_exc = exceptions.TransportError(caught_exc)
six.raise_from(new_exc, caught_exc)
class _MutualTlsAdapter(requests.adapters.HTTPAdapter):
"""
A TransportAdapter that enables mutual TLS.
Args:
cert (bytes): client certificate in PEM format
key (bytes): client private key in PEM format
Raises:
ImportError: if certifi or pyOpenSSL is not installed
OpenSSL.crypto.Error: if client cert or key is invalid
"""
def __init__(self, cert, key):
import certifi
from OpenSSL import crypto
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ctx_poolmanager = create_urllib3_context()
ctx_poolmanager.load_verify_locations(cafile=certifi.where())
ctx_poolmanager._ctx.use_certificate(x509)
ctx_poolmanager._ctx.use_privatekey(pkey)
self._ctx_poolmanager = ctx_poolmanager
ctx_proxymanager = create_urllib3_context()
ctx_proxymanager.load_verify_locations(cafile=certifi.where())
ctx_proxymanager._ctx.use_certificate(x509)
ctx_proxymanager._ctx.use_privatekey(pkey)
self._ctx_proxymanager = ctx_proxymanager
super(_MutualTlsAdapter, self).__init__()
def init_poolmanager(self, *args, **kwargs):
kwargs["ssl_context"] = self._ctx_poolmanager
super(_MutualTlsAdapter, self).init_poolmanager(*args, **kwargs)
def proxy_manager_for(self, *args, **kwargs):
kwargs["ssl_context"] = self._ctx_proxymanager
return super(_MutualTlsAdapter, self).proxy_manager_for(*args, **kwargs)
class AuthorizedSession(requests.Session):
"""A Requests Session class with credentials.
This class is used to perform requests to API endpoints that require
authorization::
from google.auth.transport.requests import AuthorizedSession
authed_session = AuthorizedSession(credentials)
response = authed_session.request(
'GET', 'https://www.googleapis.com/storage/v1/b')
The underlying :meth:`request` implementation handles adding the
credentials' headers to the request and refreshing credentials as needed.
This class also supports mutual TLS via :meth:`configure_mtls_channel`
method. In order to use this method, the `GOOGLE_API_USE_CLIENT_CERTIFICATE`
environment variable must be explicitly set to ``true``, otherwise it does
nothing. Assume the environment is set to ``true``, the method behaves in the
following manner:
If client_cert_callback is provided, client certificate and private
key are loaded using the callback; if client_cert_callback is None,
application default SSL credentials will be used. Exceptions are raised if
there are problems with the certificate, private key, or the loading process,
so it should be called within a try/except block.
First we set the environment variable to ``true``, then create an :class:`AuthorizedSession`
instance and specify the endpoints::
regular_endpoint = 'https://pubsub.googleapis.com/v1/projects/{my_project_id}/topics'
mtls_endpoint = 'https://pubsub.mtls.googleapis.com/v1/projects/{my_project_id}/topics'
authed_session = AuthorizedSession(credentials)
Now we can pass a callback to :meth:`configure_mtls_channel`::
def my_cert_callback():
# some code to load client cert bytes and private key bytes, both in
# PEM format.
some_code_to_load_client_cert_and_key()
if loaded:
return cert, key
raise MyClientCertFailureException()
# Always call configure_mtls_channel within a try/except block.
try:
authed_session.configure_mtls_channel(my_cert_callback)
except:
# handle exceptions.
if authed_session.is_mtls:
response = authed_session.request('GET', mtls_endpoint)
else:
response = authed_session.request('GET', regular_endpoint)
You can alternatively use application default SSL credentials like this::
try:
authed_session.configure_mtls_channel()
except:
# handle exceptions.
Args:
credentials (google.auth.credentials.Credentials): The credentials to
add to the request.
refresh_status_codes (Sequence[int]): Which HTTP status codes indicate
that credentials should be refreshed and the request should be
retried.
max_refresh_attempts (int): The maximum number of times to attempt to
refresh the credentials and retry the request.
refresh_timeout (Optional[int]): The timeout value in seconds for
credential refresh HTTP requests.
auth_request (google.auth.transport.requests.Request):
(Optional) An instance of
:class:`~google.auth.transport.requests.Request` used when
refreshing credentials. If not passed,
an instance of :class:`~google.auth.transport.requests.Request`
is created.
default_host (Optional[str]): A host like "pubsub.googleapis.com".
This is used when a self-signed JWT is created from service
account credentials.
"""
def __init__(
self,
credentials,
refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES,
max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS,
refresh_timeout=None,
auth_request=None,
default_host=None,
):
super(AuthorizedSession, self).__init__()
self.credentials = credentials
self._refresh_status_codes = refresh_status_codes
self._max_refresh_attempts = max_refresh_attempts
self._refresh_timeout = refresh_timeout
self._is_mtls = False
self._default_host = default_host
if auth_request is None:
auth_request_session = requests.Session()
# Using an adapter to make HTTP requests robust to network errors.
# This adapter retrys HTTP requests when network errors occur
# and the requests seems safely retryable.
retry_adapter = requests.adapters.HTTPAdapter(max_retries=3)
auth_request_session.mount("https://", retry_adapter)
# Do not pass `self` as the session here, as it can lead to
# infinite recursion.
auth_request = Request(auth_request_session)
# Request instance used by internal methods (for example,
# credentials.refresh).
self._auth_request = auth_request
# https://google.aip.dev/auth/4111
# Attempt to use self-signed JWTs when a service account is used.
# A default host must be explicitly provided.
if (
isinstance(self.credentials, service_account.Credentials)
and self._default_host
):
self.credentials._create_self_signed_jwt(
"https://{}/".format(self._default_host)
)
def configure_mtls_channel(self, client_cert_callback=None):
"""Configure the client certificate and key for SSL connection.
The function does nothing unless `GOOGLE_API_USE_CLIENT_CERTIFICATE` is
explicitly set to `true`. In this case if client certificate and key are
successfully obtained (from the given client_cert_callback or from application
default SSL credentials), a :class:`_MutualTlsAdapter` instance will be mounted
to "https://" prefix.
Args:
client_cert_callback (Optional[Callable[[], (bytes, bytes)]]):
The optional callback returns the client certificate and private
key bytes both in PEM format.
If the callback is None, application default SSL credentials
will be used.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
creation failed for any reason.
"""
use_client_cert = os.getenv(
environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
)
if use_client_cert != "true":
self._is_mtls = False
return
try:
import OpenSSL
except ImportError as caught_exc:
new_exc = exceptions.MutualTLSChannelError(caught_exc)
six.raise_from(new_exc, caught_exc)
try:
(
self._is_mtls,
cert,
key,
) = google.auth.transport._mtls_helper.get_client_cert_and_key(
client_cert_callback
)
if self._is_mtls:
mtls_adapter = _MutualTlsAdapter(cert, key)
self.mount("https://", mtls_adapter)
except (
exceptions.ClientCertError,
ImportError,
OpenSSL.crypto.Error,
) as caught_exc:
new_exc = exceptions.MutualTLSChannelError(caught_exc)
six.raise_from(new_exc, caught_exc)
def request(
self,
method,
url,
data=None,
headers=None,
max_allowed_time=None,
timeout=_DEFAULT_TIMEOUT,
**kwargs
):
"""Implementation of Requests' request.
Args:
timeout (Optional[Union[float, Tuple[float, float]]]):
The amount of time in seconds to wait for the server response
with each individual request. Can also be passed as a tuple
``(connect_timeout, read_timeout)``. See :meth:`requests.Session.request`
documentation for details.
max_allowed_time (Optional[float]):
If the method runs longer than this, a ``Timeout`` exception is
automatically raised. Unlike the ``timeout`` parameter, this
value applies to the total method execution time, even if
multiple requests are made under the hood.
Mind that it is not guaranteed that the timeout error is raised
at ``max_allowed_time``. It might take longer, for example, if
an underlying request takes a lot of time, but the request
itself does not timeout, e.g. if a large file is being
transmitted. The timout error will be raised after such
request completes.
"""
# pylint: disable=arguments-differ
# Requests has a ton of arguments to request, but only two
# (method, url) are required. We pass through all of the other
# arguments to super, so no need to exhaustively list them here.
# Use a kwarg for this instead of an attribute to maintain
# thread-safety.
_credential_refresh_attempt = kwargs.pop("_credential_refresh_attempt", 0)
# Make a copy of the headers. They will be modified by the credentials
# and we want to pass the original headers if we recurse.
request_headers = headers.copy() if headers is not None else {}
# Do not apply the timeout unconditionally in order to not override the
# _auth_request's default timeout.
auth_request = (
self._auth_request
if timeout is None
else functools.partial(self._auth_request, timeout=timeout)
)
remaining_time = max_allowed_time
with TimeoutGuard(remaining_time) as guard:
self.credentials.before_request(auth_request, method, url, request_headers)
remaining_time = guard.remaining_timeout
with TimeoutGuard(remaining_time) as guard:
response = super(AuthorizedSession, self).request(
method,
url,
data=data,
headers=request_headers,
timeout=timeout,
**kwargs
)
remaining_time = guard.remaining_timeout
# If the response indicated that the credentials needed to be
# refreshed, then refresh the credentials and re-attempt the
# request.
# A stored token may expire between the time it is retrieved and
# the time the request is made, so we may need to try twice.
if (
response.status_code in self._refresh_status_codes
and _credential_refresh_attempt < self._max_refresh_attempts
):
_LOGGER.info(
"Refreshing credentials due to a %s response. Attempt %s/%s.",
response.status_code,
_credential_refresh_attempt + 1,
self._max_refresh_attempts,
)
# Do not apply the timeout unconditionally in order to not override the
# _auth_request's default timeout.
auth_request = (
self._auth_request
if timeout is None
else functools.partial(self._auth_request, timeout=timeout)
)
with TimeoutGuard(remaining_time) as guard:
self.credentials.refresh(auth_request)
remaining_time = guard.remaining_timeout
# Recurse. Pass in the original headers, not our modified set, but
# do pass the adjusted max allowed time (i.e. the remaining total time).
return self.request(
method,
url,
data=data,
headers=headers,
max_allowed_time=remaining_time,
timeout=timeout,
_credential_refresh_attempt=_credential_refresh_attempt + 1,
**kwargs
)
return response
@property
def is_mtls(self):
"""Indicates if the created SSL channel is mutual TLS."""
return self._is_mtls
|
{
"content_hash": "65e28b2c116894137d14d89d36393513",
"timestamp": "",
"source": "github",
"line_count": 525,
"max_line_length": 96,
"avg_line_length": 36.92761904761905,
"alnum_prop": 0.6239232475370093,
"repo_name": "luci/luci-py",
"id": "d317544b74f718d0a819c224e63a1e034b576e15",
"size": "19963",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "client/third_party/google/auth/transport/requests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5576"
},
{
"name": "HTML",
"bytes": "1900972"
},
{
"name": "JavaScript",
"bytes": "113046"
},
{
"name": "Makefile",
"bytes": "11718"
},
{
"name": "Python",
"bytes": "5885612"
},
{
"name": "Shell",
"bytes": "5183"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import get_user_model
from django.core import mail
from django.urls import reverse
from django.test import TestCase
from ..models import Job
from ..factories import (
ApprovedJobFactory, DraftJobFactory, JobCategoryFactory, JobTypeFactory,
ReviewJobFactory, JobsBoardAdminGroupFactory,
)
from users.factories import UserFactory
class JobsViewTests(TestCase):
def setUp(self):
self.user = UserFactory(password='password')
self.user2 = UserFactory(password='password')
self.staff = UserFactory(
password='password',
is_staff=True,
groups=[JobsBoardAdminGroupFactory()],
)
self.job_category = JobCategoryFactory(
name='Game Production',
slug='game-production'
)
self.job_type = JobTypeFactory(
name='FrontEnd Developer',
slug='frontend-developer'
)
self.job = ApprovedJobFactory(
description='Lorem ipsum dolor sit amet',
category=self.job_category,
city='Memphis',
region='TN',
country='USA',
email='hr@company.com',
is_featured=True,
telecommuting=True,
creator=self.user,
)
self.job.job_types.add(self.job_type)
self.job_draft = DraftJobFactory(
description='Lorem ipsum dolor sit amet',
category=self.job_category,
city='Memphis',
region='TN',
country='USA',
email='hr@company.com',
is_featured=True,
creator=self.user,
)
self.job_draft.job_types.add(self.job_type)
def test_job_list(self):
url = reverse('jobs:job_list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'jobs/base.html')
self.assertTemplateUsed(response, 'jobs/job_list.html')
url = reverse('jobs:job_list_type', kwargs={'slug': self.job_type.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 1)
self.assertTemplateUsed(response, 'jobs/base.html')
self.assertTemplateUsed(response, 'jobs/job_list.html')
url = reverse('jobs:job_list_category', kwargs={'slug': self.job_category.slug})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 1)
self.assertTemplateUsed(response, 'jobs/base.html')
self.assertTemplateUsed(response, 'jobs/job_list.html')
url = reverse('jobs:job_list_location', kwargs={'slug': self.job.location_slug})
response = self.client.get(url)
self.assertEqual(len(response.context['object_list']), 1)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'jobs/base.html')
self.assertTemplateUsed(response, 'jobs/job_list.html')
def test_job_list_mine(self):
url = reverse('jobs:job_list_mine')
response = self.client.get(url)
self.assertRedirects(response, '{}?next={}'.format(reverse('account_login'), url))
username = 'kevinarnold'
email = 'kevinarnold@example.com'
password = 'secret'
User = get_user_model()
creator = User.objects.create_user(username, email, password)
self.job = ApprovedJobFactory(
description='My job listing',
category=self.job_category,
city='Memphis',
region='TN',
country='USA',
email='hr@company.com',
creator=creator,
is_featured=True
)
self.client.login(username=username, password=password)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 1)
self.assertEqual(response.context['jobs_count'], 2)
self.assertTemplateUsed(response, 'jobs/base.html')
self.assertTemplateUsed(response, 'jobs/job_list.html')
def test_job_mine_remove(self):
url = reverse('jobs:job_list_mine')
self.client.login(username=self.user.username, password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
job = response.context['object_list'][0]
self.assertNotEqual(job.status, job.STATUS_REMOVED)
url = reverse('jobs:job_remove', kwargs={'pk': job.pk})
response = self.client.get(url)
self.assertRedirects(response, reverse('jobs:job_list_mine'))
job.refresh_from_db()
self.assertEqual(job.status, job.STATUS_REMOVED)
def test_job_mine_remove_404(self):
url = reverse('jobs:job_list_mine')
self.client.login(username=self.user2.username, password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 0)
self.assertNotEqual(self.job.status, self.job.STATUS_REMOVED)
url = reverse('jobs:job_remove', kwargs={'pk': self.job.pk})
response = self.client.get(url)
self.assertRedirects(response, reverse('jobs:job_list_mine'))
self.assertNotEqual(self.job.status, self.job.STATUS_REMOVED)
def test_job_mine_remove_post_request(self):
url = reverse('jobs:job_remove', kwargs={'pk': self.job.pk})
self.client.login(username=self.user.username, password='password')
response = self.client.post(url)
self.assertEqual(response.status_code, 405)
def test_job_mine_remove_login(self):
url = reverse('jobs:job_remove', kwargs={'pk': self.job.pk})
response = self.client.get(url)
self.assertRedirects(
response,
'/accounts/login/?next=/jobs/%d/remove/' % self.job.pk
)
def test_disallow_editing_approved_jobs(self):
self.client.login(username=self.user.username, password='password')
url = reverse('jobs:job_edit', kwargs={'pk': self.job.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_disallow_previewing_approved_jobs(self):
self.client.login(username=self.user.username, password='password')
url = reverse('jobs:job_preview', kwargs={'pk': self.job.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_job_edit(self):
username = 'kevinarnold'
email = 'kevinarnold@example.com'
password = 'secret'
User = get_user_model()
creator = User.objects.create_user(username, email, password)
job = DraftJobFactory(
description='My job listing',
category=self.job_category,
city='Memphis',
region='TN',
country='USA',
email='hr@company.com',
creator=creator,
is_featured=True
)
job.job_types.add(self.job_type)
self.client.login(username=username, password=password)
url = reverse('jobs:job_edit', kwargs={'pk': job.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'jobs/base.html')
# Edit the job. Job.editable should return True to be
# able to edit a job.
form = response.context['form']
data = form.initial
# Quoted from Django 1.10 release notes:
# Private API django.forms.models.model_to_dict() returns a
# queryset rather than a list of primary keys for ManyToManyFields.
data['job_types'] = [self.job_type.pk]
data['description'] = 'Lorem ipsum dolor sit amet'
response = self.client.post(url, data)
self.assertRedirects(response, '/jobs/%d/preview/' % job.pk)
edited_job = Job.objects.get(pk=job.pk)
self.assertEqual(edited_job.description.raw, 'Lorem ipsum dolor sit amet')
self.client.logout()
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/jobs/%d/edit/' % job.pk)
# Staffs can see the edit form.
self.client.login(username=self.staff.username, password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_job_detail(self):
url = self.job.get_absolute_url()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['jobs_count'], 1)
self.assertTemplateUsed(response, 'jobs/base.html')
# Logout users cannot see the job details.
url = self.job_draft.get_absolute_url()
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# Creator can see their own jobs no matter the status.
self.client.login(username=self.user.username, password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# And other users can see other users approved jobs.
self.client.logout()
self.client.login(username=self.user2.username, password='password')
response = self.client.get(self.job.get_absolute_url())
self.assertEqual(response.status_code, 200)
# Try to reach a job that doesn't exist.
url = reverse('jobs:job_detail', kwargs={'pk': 999999})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_job_detail_security(self):
"""
Ensure the public can only see approved jobs, but staff can view
all jobs
"""
response = self.client.get(self.job.get_absolute_url())
self.assertEqual(response.status_code, 200)
# Normal users can't see non-approved Jobs
response = self.client.get(self.job_draft.get_absolute_url())
self.assertEqual(response.status_code, 404)
# Staff can see everything
self.client.login(username=self.staff.username, password='password')
response = self.client.get(self.job.get_absolute_url())
self.assertEqual(response.status_code, 200)
response = self.client.get(self.job_draft.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_job_create(self):
mail.outbox = []
url = reverse('jobs:job_create')
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/jobs/create/')
post_data = {
'category': self.job_category.pk,
'job_types': [self.job_type.pk],
'company_name': 'Some Company',
'company_description': 'Some Description',
'job_title': 'Test Job',
'city': 'San Diego',
'region': 'CA',
'country': 'USA',
'description': 'Lorem ipsum dolor sit amet',
'requirements': 'Some requirements',
'email': 'hr@company.com',
'url': 'https://jobs.company.com',
}
# Check that anonymous posting is not allowed. See #852.
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/jobs/create/')
# Now test job submitted by logged in user
post_data['company_name'] = 'Other Studio'
username = 'kevinarnold'
email = 'kevinarnold@example.com'
password = 'secret'
User = get_user_model()
creator = User.objects.create_user(username, email, password)
self.client.login(username=creator.username, password='secret')
response = self.client.post(url, post_data, follow=True)
# Job was saved in draft mode
jobs = Job.objects.filter(company_name='Other Studio')
self.assertEqual(len(jobs), 1)
job = jobs[0]
preview_url = reverse('jobs:job_preview', kwargs={'pk': job.pk})
self.assertRedirects(response, preview_url)
self.assertNotEqual(job.created, None)
self.assertNotEqual(job.updated, None)
self.assertEqual(job.creator, creator)
self.assertEqual(job.status, 'draft')
self.assertEqual(len(mail.outbox), 0)
# Submit again to save
response = self.client.post(preview_url, {'action': 'review'})
# Job was now moved to review status
job = Job.objects.get(pk=job.pk)
self.assertEqual(job.status, 'review')
# One email was sent
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
f"Job Submitted for Approval: {job.display_name}"
)
del mail.outbox[:]
def test_job_preview_404(self):
url = reverse('jobs:job_preview', kwargs={'pk': 9999999})
# /jobs/<pk>/preview/ requires to be logged in.
self.client.login(username=self.user.username, password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_job_create_prepopulate_email(self):
create_url = reverse('jobs:job_create')
user_data = {
'username': 'phrasebook',
'email': 'hungarian@example.com',
'password': 'hovereel',
}
User = get_user_model()
creator = User.objects.create_user(**user_data)
# Logged in, email address is prepopulated.
self.client.login(username=user_data['username'],
password=user_data['password'])
response = self.client.get(create_url)
def test_job_types(self):
job_type2 = JobTypeFactory(
name='Senior Developer',
slug='senior-developer'
)
url = reverse('jobs:job_types')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn(self.job_type, response.context['types'])
self.assertNotIn(job_type2, response.context['types'])
def test_job_categories(self):
job_category2 = JobCategoryFactory(
name='Web Development',
slug='web-development'
)
url = reverse('jobs:job_categories')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn(self.job_category, response.context['categories'])
self.assertNotIn(job_category2, response.context['categories'])
def test_job_locations(self):
job2 = ReviewJobFactory(
description='Lorem ipsum dolor sit amet',
category=self.job_category,
city='Lawrence',
region='KS',
country='USA',
email='hr@company.com',
)
job2.job_types.add(self.job_type)
url = reverse('jobs:job_locations')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn(self.job, response.context['jobs'])
self.assertNotIn(job2, response.context['jobs'])
content = str(response.content)
self.assertIn('Memphis', content)
self.assertNotIn('Lawrence', content)
def test_job_telecommute(self):
url = reverse('jobs:job_telecommute')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertIn(self.job, response.context['jobs'])
def test_job_display_name(self):
self.assertEqual(self.job.display_name,
f"{self.job.job_title}, {self.job.company_name}")
self.job.company_name = 'ABC'
self.assertEqual(self.job.display_name,
f"{self.job.job_title}, {self.job.company_name}")
self.job.company_name = ''
self.assertEqual(self.job.display_name,
f"{self.job.job_title}, {self.job.company_name}")
def test_job_display_about(self):
self.job.company_description.raw = 'XYZ'
self.assertEqual(self.job.display_description.raw, self.job.company_description.raw)
self.job.company_description = ' '
self.assertEqual(self.job.display_description.raw, self.job.company_description.raw)
def test_job_list_type_404(self):
url = reverse('jobs:job_list_type', kwargs={'slug': 'invalid-type'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_job_list_category_404(self):
url = reverse('jobs:job_list_category', kwargs={'slug': 'invalid-type'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
class JobsReviewTests(TestCase):
def setUp(self):
self.super_username = 'kevinarnold'
self.super_email = 'kevinarnold@example.com'
self.super_password = 'secret'
self.creator_username = 'johndoe'
self.creator_email = 'johndoe@example.com'
self.creator_password = 'secret'
self.contact = 'John Doe'
self.another_username = 'another'
self.another_email = 'another@example.com'
self.another_password = 'secret'
User = get_user_model()
self.creator = User.objects.create_user(
self.creator_username,
self.creator_email,
self.creator_password
)
self.superuser = User.objects.create_superuser(
self.super_username,
self.super_email,
self.super_password
)
self.another = User.objects.create_user(
self.another_username,
self.another_email,
self.another_password
)
self.job_category = JobCategoryFactory(
name='Game Production',
slug='game-production'
)
self.job_type = JobTypeFactory(
name='FrontEnd Developer',
slug='frontend-developer'
)
self.job1 = ReviewJobFactory(
company_name='Kulfun Games',
description='Lorem ipsum dolor sit amet',
category=self.job_category,
city='Memphis',
region='TN',
country='USA',
email=self.creator.email,
creator=self.creator,
contact=self.contact
)
self.job1.job_types.add(self.job_type)
self.job2 = ReviewJobFactory(
company_name='Kulfun Games',
description='Lorem ipsum dolor sit amet',
category=self.job_category,
city='Memphis',
region='TN',
country='USA',
email=self.creator.email,
creator=self.creator,
contact=self.contact
)
self.job2.job_types.add(self.job_type)
self.job3 = ReviewJobFactory(
company_name='Kulfun Games',
description='Lorem ipsum dolor sit amet',
category=self.job_category,
city='Memphis',
region='TN',
country='USA',
email=self.creator.email,
creator=self.creator,
contact=self.contact
)
self.job3.job_types.add(self.job_type)
def test_moderate(self):
url = reverse('jobs:job_moderate')
job = ApprovedJobFactory()
response = self.client.get(url)
self.assertRedirects(response, '{}?next={}'.format(reverse('account_login'), url))
self.client.login(username=self.another_username, password=self.another_password)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.assertTemplateUsed(response, '403.html')
self.client.logout()
self.client.login(username=self.super_username, password=self.super_password)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 1)
self.assertIn(job, response.context['object_list'])
self.assertNotIn(self.job1, response.context['object_list'])
def test_moderate_search(self):
url = reverse('jobs:job_moderate')
job = ApprovedJobFactory(job_title='foo')
job2 = ApprovedJobFactory(job_title='bar foo')
self.client.login(username=self.super_username, password=self.super_password)
response = self.client.get(url, {'term': 'foo'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 2)
self.assertIn(job, response.context['object_list'])
self.assertIn(job2, response.context['object_list'])
def test_job_review(self):
# FIXME: refactor to separate tests cases for clarity?
mail.outbox = []
url = reverse('jobs:job_review')
response = self.client.get(url)
self.assertRedirects(response, '{}?next={}'.format(reverse('account_login'), url))
self.client.login(username=self.another_username, password=self.another_password)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.assertTemplateUsed(response, '403.html')
self.client.logout()
self.client.login(username=self.super_username, password=self.super_password)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 3)
self.assertIn(self.job1, response.context['object_list'])
self.assertIn(self.job2, response.context['object_list'])
self.assertIn(self.job3, response.context['object_list'])
# no email notifications sent before offer is approved
self.assertEqual(len(mail.outbox), 0)
self.client.post(url, data={'job_id': self.job1.pk, 'action': 'approve'})
j1 = Job.objects.get(pk=self.job1.pk)
self.assertEqual(j1.status, Job.STATUS_APPROVED)
# exactly one approval notification email should sent
# to the offer creator
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.to, [self.creator.email, 'jobs@python.org'])
self.assertIn(self.contact, message.body)
mail.outbox = []
# no email notifications sent before offer is rejected
self.assertEqual(len(mail.outbox), 0)
self.client.post(url, data={'job_id': self.job2.pk, 'action': 'reject'})
j2 = Job.objects.get(pk=self.job2.pk)
self.assertEqual(j2.status, Job.STATUS_REJECTED)
# exactly one rejection notification email should sent
# to the offer creator
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.to, [self.creator.email, 'jobs@python.org'])
self.assertIn(self.contact, message.body)
mail.outbox = []
response = self.client.post(url, data={'job_id': self.job2.pk, 'action': 'archive'})
self.assertRedirects(response, reverse('jobs:job_review'))
j2 = Job.objects.get(pk=self.job2.pk)
self.assertEqual(j2.status, Job.STATUS_ARCHIVED)
self.client.post(url, data={'job_id': self.job3.pk, 'action': 'remove'})
j3 = Job.objects.get(pk=self.job3.pk)
self.assertEqual(j3.status, Job.STATUS_REMOVED)
response = self.client.post(url, data={'job_id': 999999, 'action': 'approve'})
self.assertEqual(response.status_code, 302)
# Invalid action should raise a 404 error.
response = self.client.post(url, data={'job_id': self.job2.pk, 'action': 'invalid'})
self.assertEqual(response.status_code, 404)
def test_job_comment(self):
mail.outbox = []
self.client.login(username=self.creator_username, password=self.creator_password)
url = reverse('jobs:job_review_comment_create')
form_data = {
'job': self.job1.pk,
'comment': 'Lorem ispum',
}
self.assertEqual(len(mail.outbox), 0)
response = self.client.post(url, form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
# We should only send an email to jobs@p.o.
self.assertEqual(mail.outbox[0].to, ['jobs@python.org'])
self.assertIn('Dear Python Job Board Admin,', mail.outbox[0].body)
self.client.logout()
# Send a comment as a jobs board admin.
mail.outbox = []
self.client.login(username=self.super_username, password=self.super_password)
self.assertEqual(len(mail.outbox), 0)
response = self.client.post(url, form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
# We should send an email to both jobs@p.o and job submitter.
self.assertEqual(mail.outbox[0].to, ['jobs@python.org', self.creator_email])
self.assertIn(
'There is a new review comment available for your job posting.',
mail.outbox[0].body
)
def test_job_comment_401(self):
mail.outbox = []
self.client.login(username=self.another_username, password=self.another_password)
url = reverse('jobs:job_review_comment_create')
form_data = {
'job': self.job1.pk,
'comment': 'Foooo',
}
self.assertEqual(len(mail.outbox), 0)
response = self.client.post(url, form_data)
self.assertEqual(response.status_code, 401)
self.assertEqual(len(mail.outbox), 0)
def test_job_comment_401_approve(self):
mail.outbox = []
self.client.login(username=self.creator_username, password=self.creator_password)
url = reverse('jobs:job_review_comment_create')
form_data = {
'job': self.job1.pk,
'action': 'approve',
}
self.assertEqual(len(mail.outbox), 0)
response = self.client.post(url, form_data)
self.assertEqual(response.status_code, 401)
self.assertEqual(len(mail.outbox), 0)
def test_job_comment_approve(self):
mail.outbox = []
self.client.login(username=self.super_username, password=self.super_password)
url = reverse('jobs:job_review_comment_create')
form_data = {
'job': self.job1.pk,
'action': 'approve',
}
self.assertEqual(len(mail.outbox), 0)
response = self.client.post(url, form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.creator.email, 'jobs@python.org'])
|
{
"content_hash": "639c1511b63b200b372c38d189344686",
"timestamp": "",
"source": "github",
"line_count": 717,
"max_line_length": 92,
"avg_line_length": 37.642956764295675,
"alnum_prop": 0.6167098925527973,
"repo_name": "python/pythondotorg",
"id": "763dca666b23e9d809c97e5705b3f29a9390b59b",
"size": "26990",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "jobs/tests/test_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7686"
},
{
"name": "Dockerfile",
"bytes": "229"
},
{
"name": "HTML",
"bytes": "498813"
},
{
"name": "JavaScript",
"bytes": "24050"
},
{
"name": "Makefile",
"bytes": "1615"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Procfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "1145343"
},
{
"name": "Ruby",
"bytes": "1464"
},
{
"name": "SCSS",
"bytes": "198033"
}
],
"symlink_target": ""
}
|
"""Helper methods for creating & verifying XSRF tokens."""
__authors__ = [
'"Doug Coker" <dcoker@google.com>',
'"Joe Gregorio" <jcgregorio@google.com>',
]
import base64
import hmac
import os # for urandom
import time
from oauth2client import util
# Delimiter character
DELIMITER = ':'
# 1 hour in seconds
DEFAULT_TIMEOUT_SECS = 1*60*60
@util.positional(2)
def generate_token(key, user_id, action_id="", when=None):
"""Generates a URL-safe token for the given user, action, time tuple.
Args:
key: secret key to use.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
when: the time in seconds since the epoch at which the user was
authorized for this action. If not set the current time is used.
Returns:
A string XSRF protection token.
"""
when = when or int(time.time())
digester = hmac.new(key)
digester.update(str(user_id))
digester.update(DELIMITER)
digester.update(action_id)
digester.update(DELIMITER)
digester.update(str(when))
digest = digester.digest()
token = base64.urlsafe_b64encode('%s%s%d' % (digest,
DELIMITER,
when))
return token
@util.positional(3)
def validate_token(key, token, user_id, action_id="", current_time=None):
"""Validates that the given token authorizes the user for the action.
Tokens are invalid if the time of issue is too old or if the token
does not match what generateToken outputs (i.e. the token was forged).
Args:
key: secret key to use.
token: a string of the token generated by generateToken.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
Returns:
A boolean - True if the user is authorized for the action, False
otherwise.
"""
if not token:
return False
try:
decoded = base64.urlsafe_b64decode(str(token))
token_time = long(decoded.split(DELIMITER)[-1])
except (TypeError, ValueError):
return False
if current_time is None:
current_time = time.time()
# If the token is too old it's not valid.
if current_time - token_time > DEFAULT_TIMEOUT_SECS:
return False
# The given token should match the generated one with the same time.
expected_token = generate_token(key, user_id, action_id=action_id,
when=token_time)
if len(token) != len(expected_token):
return False
# Perform constant time comparison to avoid timing attacks
different = 0
for x, y in zip(token, expected_token):
different |= ord(x) ^ ord(y)
if different:
return False
return True
|
{
"content_hash": "acd025535f27009b42102d5ffa96ed74",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 73,
"avg_line_length": 29.193877551020407,
"alnum_prop": 0.6452289409297448,
"repo_name": "emedinaa/contentbox",
"id": "d51be1e08f5081d5f2304feff78eed5436e53352",
"size": "3481",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/oauth2client/xsrfutil.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "420520"
},
{
"name": "HTML",
"bytes": "54100"
},
{
"name": "JavaScript",
"bytes": "1778"
},
{
"name": "Python",
"bytes": "49359"
},
{
"name": "Ruby",
"bytes": "413"
}
],
"symlink_target": ""
}
|
"""
/hosts endpoint for Daisy v1 API
"""
import subprocess
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from collections import Counter
from webob.exc import HTTPServerError
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for hosts resource in Daisy v1 API
The hosts resource API is a RESTful web service for host data. The API
is as follows::
GET /nodes -- Returns a set of brief metadata about hosts
GET /nodes -- Returns a set of detailed metadata about
hosts
HEAD /nodes/<ID> -- Return metadata about an host with id <ID>
GET /nodes/<ID> -- Return host data for host with id <ID>
POST /nodes -- Store host data and return metadata about the
newly-stored host
PUT /nodes/<ID> -- Update host metadata and/or upload host
data for a previously-reserved host
DELETE /nodes/<ID> -- Delete the host with id <ID>
"""
support_resource_type = ['baremetal', 'server', 'docker']
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _raise_404_if_network_deleted(self, req, network_id):
network = self.get_network_meta_or_404(req, network_id)
if network is None or network['deleted']:
msg = _("Network with identifier %s has been deleted.") % network_id
raise HTTPNotFound(msg)
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster is None or cluster['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % cluster_id
raise HTTPNotFound(msg)
def _raise_404_if_role_deleted(self, req, role_id):
role = self.get_role_meta_or_404(req, role_id)
if role is None or role['deleted']:
msg = _("Cluster with identifier %s has been deleted.") % role_id
raise HTTPNotFound(msg)
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
@utils.mutating
def add_host(self, req, host_meta):
"""
Adds a new host to Daisy
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about host
:raises HTTPBadRequest if x-host-name is missing
"""
# if host is update in '_verify_interface_among_hosts', no need add host continue.
cluster_id = host_meta.get('cluster', None)
if self._verify_interface_among_hosts(req, cluster_id, host_meta):
return {'host_meta': host_meta}
self._enforce(req, 'add_host')
if host_meta.has_key('resource_type'):
if host_meta['resource_type'] not in self.support_resource_type:
msg = "resource type is not supported, please use it in %s" % self.support_resource_type
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
else:
host_meta['resource_type'] = 'baremetal'
if cluster_id:
self.get_cluster_meta_or_404(req, cluster_id)
if host_meta.has_key('role') and host_meta['role']:
role_id_list = []
host_roles=[]
if host_meta.has_key('cluster'):
params = self._get_query_params(req)
role_list = registry.get_roles_detail(req.context, **params)
for role_name in role_list:
if role_name['cluster_id'] == host_meta['cluster']:
host_roles = list(eval(host_meta['role']))
for host_role in host_roles:
if role_name['name'] == host_role:
role_id_list.append(role_name['id'])
continue
if len(role_id_list) != len(host_roles):
msg = "The role of params %s is not exist, please use the right name" % host_roles
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
host_meta['role'] = role_id_list
else:
msg = "cluster params is none"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
assigned_networks = {}
if host_meta.has_key('interfaces'):
network_id_list = []
orig_keys = list(eval(host_meta['interfaces']))
for network in orig_keys:
if network.has_key('is_deployment'):
if network['is_deployment'] == "True" or network['is_deployment'] == True:
network['is_deployment'] = 1
else:
network['is_deployment'] = 0
if network.has_key('assigned_networks') and network['assigned_networks'] != [''] and network['assigned_networks']:
if host_meta.has_key('cluster'):
network_list = registry.get_networks_detail(req.context, host_meta['cluster'])
for network_name in list(network['assigned_networks']):
lenth = len(network_id_list)
for network_info in network_list:
if network_name == network_info['name']:
network_id_list.append(network_info['id'])
if network_info.get('id', None) \
and network_info.get('name', None) \
and network.get('name', None):
assigned_networks[network_info['id']] = \
[network_info.get('name', None), network.get('name', None)]
if lenth == len(network_id_list):
msg="The network of params %s is not exist, please use the right name" % network_name
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
network['assigned_networks'] = network_id_list
# by cluster id and network_name search network table
registry.update_phyname_of_network(req.context, assigned_networks)
else:
msg = "cluster params is none"
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if network.has_key('mac') and network.has_key('ip'):
host_infos = registry.get_host_interface(req.context, host_meta)
for host_info in host_infos:
if host_info.has_key('host_id'):
host_meta["id"] = host_info['host_id']
if host_meta.has_key('os_status'):
if host_meta['os_status'] not in ['init', 'installing', 'active', 'failed', 'none']:
msg = "os_status is not valid."
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
if host_meta.has_key('ipmi_addr') and host_meta['ipmi_addr']:
if not host_meta.has_key('ipmi_user'):
host_meta['ipmi_user'] = 'zteroot'
if not host_meta.has_key('ipmi_passwd'):
host_meta['ipmi_passwd'] = 'superuser'
host_meta = registry.add_host_metadata(req.context, host_meta)
return {'host_meta': host_meta}
@utils.mutating
def delete_host(self, req, id):
"""
Deletes a host from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about host
:raises HTTPBadRequest if x-host-name is missing
"""
self._enforce(req, 'delete_host')
try:
registry.delete_host_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find host to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete host: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("Host %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('host.delete', host)
return Response(body='', status=200)
@utils.mutating
def get_host(self, req, id):
"""
Returns metadata about an host in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque host identifier
:raises HTTPNotFound if host metadata is not available to user
"""
self._enforce(req, 'get_host')
host_meta = self.get_host_meta_or_404(req, id)
return {'host_meta': host_meta}
def detail(self, req):
"""
Returns detailed information for all available nodes
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'nodes': [
{'id': <ID>,
'name': <NAME>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_hosts')
params = self._get_query_params(req)
try:
nodes = registry.get_hosts_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(nodes=nodes)
def _verify_interface_in_same_host(self, interfaces, id = None):
"""
Verify interface in the input host.
:param interface: host interface info
:return:
"""
# verify interface among the input host
interfaces = eval(interfaces)
same_mac_list = [interface1['name']
for interface1 in interfaces for interface2 in interfaces
if interface1.get('name', None) and interface1.get('mac', None) and
interface2.get('name', None) and interface2.get('mac', None) and
interface1.get('type', None) and interface2.get('type', None) and
interface1['name'] != interface2['name'] and interface1['mac'] == interface2['mac']
and interface1['type'] != "bond" and interface2['type'] != "bond"]
# Notice:If interface with same 'mac' is illegal,we need delete code #1,and raise exception in 'if' block.
# This code block is just verify for early warning.
if same_mac_list:
msg = "%s%s" % ("" if not id else "Host id:%s." % id,
"The nic name of interface [%s] with same mac,please check!" %
",".join(same_mac_list))
LOG.warn(msg)
# 1-----------------------------------------------------------------
# if interface with same 'pci', raise exception
same_pci_list = [interface1['name']
for interface1 in interfaces for interface2 in interfaces
if interface1.get('name', None) and interface1.get('pci', None) and
interface2.get('name', None) and interface2.get('pci', None) and
interface1.get('type', None) and interface2.get('type', None) and
interface1['name'] != interface2['name'] and interface1['pci'] == interface2['pci']
and interface1['type'] != "bond" and interface2['type'] != "bond"]
if same_pci_list:
msg = "The nic name of interface [%s] with same pci,please check!" % ",".join(same_pci_list)
raise HTTPForbidden(explanation = msg)
# 1-----------------------------------------------------------------
def _verify_interface_among_hosts(self, req, cluster_id, host_meta):
"""
Verify interface among the hosts in cluster
:param req:
:param cluster_id:
:param host_meta:
:return:
"""
# If true, the host need update, not add and update is successful.
host_is_update = False
if not host_meta.get('interfaces', None):
return host_is_update
self._verify_interface_in_same_host(host_meta['interfaces'])
# host pxe interface info
interfaces = eval(host_meta['interfaces'])
input_host_pxe_info = [interface
for interface in interfaces
if interface.get('is_deployment', None) == "True"]
# In default,we think there is only one pxe interface.
# If it not only the exception will be raise.
if not input_host_pxe_info:
LOG.info("<<<The host %s don't have pxe interface.>>>" % host_meta.get('name', None))
return host_is_update
if len(input_host_pxe_info) > 1:
msg = ("There are two different pxe nics among the same host,it isn't allowed.")
raise HTTPServerError(explanation = msg)
if not cluster_id:
return host_is_update
# verify interface between exist host and input host in cluster
list_params = {
'sort_key': u'name',
'sort_dir': u'asc',
'limit': u'20',
'filters': {u'cluster_id': cluster_id}
}
exist_nodes = registry.get_hosts_detail(req.context, **list_params)
input_host_pxe_info = input_host_pxe_info[0]
for exist_node in exist_nodes:
id = exist_node.get('id', None)
exist_node_info = self.get_host(req, id).get('host_meta', None)
if not exist_node_info.get('interfaces', None):
continue
for interface in exist_node_info['interfaces']:
if interface.get('mac', None) != input_host_pxe_info.get('mac', None):
continue
if exist_node.get('dmi_uuid', None) != host_meta.get('dmi_uuid', None):
msg = "The 'mac' of host interface is exist in db, but 'dmi_uuid' is different." \
"We think you want update the host, but the host can't find."
raise HTTPForbidden(explanation = msg)
host_meta['id'] = id
host_meta['cluster_id'] = id
self.update_host(req, id, host_meta)
LOG.info("<<<FUN:verify_interface, host:%s is already update.>>>" % id)
host_is_update = True
return host_is_update
def _get_swap_lv_size_m(self, memory_size_m):
if memory_size_m <= 4096:
swap_lv_size_m = 4096
elif memory_size_m <= 16384:
swap_lv_size_m = 8192
elif memory_size_m <= 65536:
swap_lv_size_m = 32768
else:
swap_lv_size_m = 65536
return swap_lv_size_m
@utils.mutating
def update_host(self, req, id, host_meta):
"""
Updates an existing host with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
if host_meta.get('interfaces', None):
self._verify_interface_in_same_host(host_meta['interfaces'], id)
self._enforce(req, 'update_host')
orig_host_meta = self.get_host_meta_or_404(req, id)
# Do not allow any updates on a deleted image.
# Fix for LP Bug #1060930
if orig_host_meta['deleted']:
msg = _("Forbidden to update deleted host.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
if host_meta.has_key('cluster'):
self.get_cluster_meta_or_404(req, host_meta['cluster'])
if (host_meta.has_key('resource_type') and
host_meta['resource_type'] not in self.support_resource_type):
msg = "resource type is not supported, please use it in %s" % self.support_resource_type
raise HTTPNotFound(msg)
if orig_host_meta.get('disks',None):
if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active':
host_meta['root_disk'] = orig_host_meta['root_disk']
else:
if host_meta.get('root_disk',None):
root_disk = host_meta['root_disk']
elif orig_host_meta.get('root_disk',None):
root_disk = str(orig_host_meta['root_disk'])
else:
host_meta['root_disk'] = 'sda'
root_disk = host_meta['root_disk']
if root_disk not in orig_host_meta['disks'].keys():
msg = "There is no disk named %s" % root_disk
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
else:
host_meta['root_disk'] = orig_host_meta['root_disk']
if orig_host_meta.get('disks',None):
if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active':
host_meta['root_lv_size'] = orig_host_meta['root_lv_size']
else:
if host_meta.get('root_lv_size',None):
root_lv_size = host_meta['root_lv_size']
elif orig_host_meta.get('root_lv_size',None):
root_lv_size = str(orig_host_meta['root_lv_size'])
else:
host_meta['root_lv_size'] = '51200'
root_lv_size = host_meta['root_lv_size']
if root_lv_size.isdigit():
root_lv_size=int(root_lv_size)
root_disk_storage_size_b_str = str(orig_host_meta['disks']['%s' %root_disk]['size'])
root_disk_storage_size_b_int = int(root_disk_storage_size_b_str.strip().split()[0])
root_disk_storage_size_m = root_disk_storage_size_b_int//(1024*1024)
boot_partition_m = 400
redundant_partiton_m = 100
free_root_disk_storage_size_m = root_disk_storage_size_m - boot_partition_m - redundant_partiton_m
if (root_lv_size/4)*4 > free_root_disk_storage_size_m:
msg = "root_lv_size of %s is larger than the free_root_disk_storage_size."%orig_host_meta['id']
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
if (root_lv_size/4)*4 < 51200:
msg = "root_lv_size of %s is too small ,it must be larger than 51200M."%orig_host_meta['id']
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
else:
msg = (_("root_lv_size of %s is wrong,please input a number and it must be positive number") %orig_host_meta['id'])
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
else:
host_meta['root_lv_size'] = orig_host_meta['root_lv_size']
if orig_host_meta.get('disks',None):
if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active':
host_meta['swap_lv_size'] = orig_host_meta['swap_lv_size']
else:
if host_meta.get('swap_lv_size',None):
swap_lv_size = host_meta['swap_lv_size']
elif orig_host_meta.get('swap_lv_size',None):
swap_lv_size = str(orig_host_meta['swap_lv_size'])
else:
if not orig_host_meta.get('memory',None):
msg = "there is no memory in %s" %orig_host_meta['id']
raise HTTPNotFound(msg)
memory_size_b_str = str(orig_host_meta['memory']['total'])
memory_size_b_int = int(memory_size_b_str.strip().split()[0])
memory_size_m = memory_size_b_int//1024
swap_lv_size_m = self._get_swap_lv_size_m(memory_size_m)
host_meta['swap_lv_size'] = str(swap_lv_size_m)
swap_lv_size = host_meta['swap_lv_size']
if swap_lv_size.isdigit():
swap_lv_size=int(swap_lv_size)
disk_storage_size_b = 0
for key in orig_host_meta['disks']:
stroage_size_str = orig_host_meta['disks'][key]['size']
stroage_size_b_int = int(stroage_size_str.strip().split()[0])
disk_storage_size_b = disk_storage_size_b + stroage_size_b_int
disk_storage_size_m = disk_storage_size_b/(1024*1024)
boot_partition_m = 400
redundant_partiton_m = 100
free_disk_storage_size_m = disk_storage_size_m - boot_partition_m - redundant_partiton_m - (root_lv_size/4)*4
if (swap_lv_size/4)*4 > free_disk_storage_size_m:
msg = "swap_lv_size of %s is larger than the free_disk_storage_size."%orig_host_meta['id']
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
if (swap_lv_size/4)*4 < 2000:
msg = "swap_lv_size of %s is too small ,it must be larger than 2000M."%orig_host_meta['id']
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
else:
msg = (_("swap_lv_size of %s is wrong,please input a number and it must be positive number") %orig_host_meta['id'])
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
else:
host_meta['swap_lv_size'] = orig_host_meta['swap_lv_size']
if orig_host_meta.get('disks',None):
if not host_meta.get('root_pwd',None) and not orig_host_meta.get('root_pwd',None):
host_meta['root_pwd'] = 'ossdbg1'
else:
host_meta['root_pwd'] = orig_host_meta['root_pwd']
if host_meta.has_key('role'):
role_id_list = []
if host_meta.has_key('cluster'):
params = self._get_query_params(req)
role_list = registry.get_roles_detail(req.context, **params)
host_roles = list()
for role_name in role_list:
if role_name['cluster_id'] == host_meta['cluster']:
host_roles = list(eval(host_meta['role']))
for host_role in host_roles:
if role_name['name'] == host_role:
role_id_list.append(role_name['id'])
continue
if len(role_id_list) != len(host_roles) and host_meta['role'] != u"[u'']":
msg = "The role of params %s is not exist, please use the right name" % host_roles
raise HTTPNotFound(msg)
host_meta['role'] = role_id_list
else:
msg = "cluster params is none"
raise HTTPNotFound(msg)
assigned_networks = {}
if host_meta.has_key('interfaces'):
network_id_list = []
orig_keys = list(eval(host_meta['interfaces']))
for network in orig_keys:
if network.has_key('is_deployment'):
if network['is_deployment'] == "True" or network['is_deployment'] == True:
network['is_deployment'] = 1
else:
network['is_deployment'] = 0
if network.has_key('assigned_networks') and network['assigned_networks'] != [''] and network['assigned_networks']:
if host_meta.has_key('cluster'):
network_list = registry.get_networks_detail(req.context, host_meta['cluster'])
for network_name in list(network['assigned_networks']):
lenth = len(network_id_list)
for network_info in network_list:
if network_name == network_info['name']:
network_id_list.append(network_info['id'])
if network_info.get('id', None) \
and network_info.get('name', None) \
and network.get('name', None):
assigned_networks[network_info['id']] = \
[network_info.get('name', None), network.get('name', None)]
if lenth == len(network_id_list):
msg="The network of params %s is not exist, please use the right name" % network_name
raise HTTPNotFound(msg)
network['assigned_networks'] = network_id_list
# by cluster id and network_name search network table
registry.update_phyname_of_network(req.context, assigned_networks)
else:
msg = "cluster params is none"
raise HTTPNotFound(msg)
if host_meta.has_key('os_status'):
if host_meta['os_status'] not in ['init', 'installing', 'active', 'failed', 'none']:
msg = "os_status is not valid."
raise HTTPNotFound(msg)
if host_meta['os_status'] == 'init':
if orig_host_meta.get('interfaces', None):
macs = [interface['mac'] for interface in orig_host_meta['interfaces']]
for mac in macs:
delete_host_discovery_info = 'pxe_os_install_clean ' + mac
subprocess.call(delete_host_discovery_info,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if (not host_meta.has_key('role') and
orig_host_meta.has_key('status') and
orig_host_meta['status'] == 'with-role'):
host_meta['role'] = []
if ((host_meta.has_key('ipmi_addr') and host_meta['ipmi_addr'])
or orig_host_meta['ipmi_addr']):
if not host_meta.has_key('ipmi_user') and not orig_host_meta['ipmi_user']:
host_meta['ipmi_user'] = 'zteroot'
if not host_meta.has_key('ipmi_passwd') and not orig_host_meta['ipmi_passwd']:
host_meta['ipmi_passwd'] = 'superuser'
try:
host_meta = registry.update_host_metadata(req.context,
id,
host_meta)
except exception.Invalid as e:
msg = (_("Failed to update host metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find host to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update host: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('Host operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('host.update', host_meta)
return {'host_meta': host_meta}
class HostDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["host_meta"] = utils.get_host_meta(request)
return result
def add_host(self, request):
return self._deserialize(request)
def update_host(self, request):
return self._deserialize(request)
class HostSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_host(self, response, result):
host_meta = result['host_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host=host_meta))
return response
def delete_host(self, response, result):
host_meta = result['host_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host=host_meta))
return response
def get_host(self, response, result):
host_meta = result['host_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(host=host_meta))
return response
def create_resource():
"""Hosts resource factory method"""
deserializer = HostDeserializer()
serializer = HostSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
|
{
"content_hash": "581cd535ca1320001e401647f5139417",
"timestamp": "",
"source": "github",
"line_count": 736,
"max_line_length": 135,
"avg_line_length": 47.39945652173913,
"alnum_prop": 0.514131743392765,
"repo_name": "OpenDaisy/daisy-api",
"id": "95cf0b307873bd1f044d363a0d5e2f52c72a9797",
"size": "35522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daisy/api/v1/hosts.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1475450"
},
{
"name": "Shell",
"bytes": "7860"
}
],
"symlink_target": ""
}
|
"""
Helpers for pytest fixtures and data related testing.
"""
##########################################################################
## Imports and Module Variables
##########################################################################
from collections import namedtuple
## Used for wrapping an dataset into a single variable.
TestDataset = namedtuple('TestDataset', 'X,y')
|
{
"content_hash": "e209fc71dd5f387f519837c41e3579b8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 29.692307692307693,
"alnum_prop": 0.46113989637305697,
"repo_name": "pdamodaran/yellowbrick",
"id": "8fc8f83c22a166bdc6f1720b10aa3e237ab48127",
"size": "698",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/fixtures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1082"
},
{
"name": "Python",
"bytes": "1218356"
},
{
"name": "TeX",
"bytes": "3743"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from mock import Mock
from sentry.api.bases.team import TeamPermission
from sentry.models import ApiKey
from sentry.testutils import TestCase
class TeamPermissionBase(TestCase):
def setUp(self):
self.org = self.create_organization(flags=0)
self.team = self.create_team(organization=self.org)
super(TeamPermissionBase, self).setUp()
def has_object_perm(self, method, obj, auth=None, user=None, is_superuser=None):
perm = TeamPermission()
request = Mock()
request.auth = auth
request.user = user
request.method = method
request.is_superuser = lambda: is_superuser if is_superuser is not None else user.is_superuser
return (
perm.has_permission(request, None) and
perm.has_object_permission(request, None, obj)
)
class TeamPermissionTest(TeamPermissionBase):
def test_get_regular_user(self):
user = self.create_user()
assert not self.has_object_perm('GET', self.team, user=user)
def test_get_superuser(self):
user = self.create_user(is_superuser=True)
assert self.has_object_perm('GET', self.team, user=user)
def test_get_without_team_membership(self):
user = self.create_user()
self.create_member(
user=user,
organization=self.org,
role='member',
teams=[],
)
assert not self.has_object_perm('GET', self.team, user=user)
def test_get_with_team_membership(self):
user = self.create_user()
self.create_member(
user=user,
organization=self.org,
role='member',
teams=[self.team],
)
assert self.has_object_perm('GET', self.team, user=user)
def test_get_api_key_with_org_access(self):
key = ApiKey.objects.create(
organization=self.org,
scopes=getattr(ApiKey.scopes, 'team:read'),
)
assert self.has_object_perm('GET', self.team, auth=key)
def test_get_api_key_without_org_access(self):
key = ApiKey.objects.create(
organization=self.create_organization(),
scopes=getattr(ApiKey.scopes, 'team:read'),
)
assert not self.has_object_perm('GET', self.team, auth=key)
def test_api_key_without_access(self):
key = ApiKey.objects.create(
organization=self.org,
scopes=0,
)
assert not self.has_object_perm('GET', self.org, auth=key)
def test_api_key_with_wrong_access(self):
key = ApiKey.objects.create(
organization=self.org,
scopes=getattr(ApiKey.scopes, 'project:read'),
)
assert not self.has_object_perm('GET', self.org, auth=key)
def test_api_key_with_wrong_access_for_method(self):
key = ApiKey.objects.create(
organization=self.org,
scopes=getattr(ApiKey.scopes, 'team:read'),
)
assert not self.has_object_perm('PUT', self.project, auth=key)
|
{
"content_hash": "0b645850c6540aec8340943695ccbf4f",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 102,
"avg_line_length": 33.76923076923077,
"alnum_prop": 0.6127562642369021,
"repo_name": "fotinakis/sentry",
"id": "bb9f48528a48af2d00e9e64acbebfe7ac6d3c5ed",
"size": "3073",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/sentry/api/bases/test_team.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "222885"
},
{
"name": "HTML",
"bytes": "282398"
},
{
"name": "JavaScript",
"bytes": "927323"
},
{
"name": "Lua",
"bytes": "22367"
},
{
"name": "Makefile",
"bytes": "5812"
},
{
"name": "Python",
"bytes": "11654397"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
from collections import Counter
from typing import List, Mapping, Union, Optional
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from bartpy.runner import run_models
from bartpy.sklearnmodel import SklearnModel
ImportanceMap = Mapping[int, float]
ImportanceDistributionMap = Mapping[int, List[float]]
def feature_split_proportions(model: SklearnModel, columns: Optional[List[int]]=None) -> Mapping[int, float]:
split_variables = []
for sample in model.model_samples:
for tree in sample.trees:
for node in tree.nodes:
splitting_var = node.split.splitting_variable
split_variables.append(splitting_var)
counter = Counter(split_variables)
if columns is None:
columns = sorted(list([x for x in counter.keys() if x is not None]))
proportions = {}
for column in columns:
if column in counter.keys():
proportions[column] = counter[column] / len(split_variables)
else:
proportions[column] = 0.0
return proportions
def plot_feature_split_proportions(model: SklearnModel, ax=None):
if ax is None:
_, ax = plt.subplots(1, 1)
proportions = feature_split_proportions(model)
y_pos = np.arange(len(proportions))
name, count = list(proportions.keys()), list(proportions.values())
props = pd.DataFrame({"name": name, "counts": count}).sort_values("name", ascending=True)
plt.barh(y_pos, props.counts, align='center', alpha=0.5)
plt.yticks(y_pos, props.name)
plt.xlabel('Proportion of all splits')
plt.ylabel('Feature')
plt.title('Proportion of Splits Made on Each Variable')
return ax
def null_feature_split_proportions_distribution(model: SklearnModel,
X: Union[pd.DataFrame, np.ndarray],
y: np.ndarray,
n_permutations: int=10) -> Mapping[int, List[float]]:
"""
Calculate a null distribution of proportion of splits on each variable in X
Works by randomly permuting y to remove any true dependence of y on X and calculating feature importance
Parameters
----------
model: SklearnModel
Model specification to work with
X: np.ndarray
Covariate matrix
y: np.ndarray
Target data
n_permutations: int
How many permutations to run
The higher the number of permutations, the more accurate the null distribution, but the longer it will take to run
Returns
-------
Mapping[int, List[float]]
A list of inclusion proportions for each variable in X
"""
inclusion_dict = {x: [] for x in range(X.shape[1])}
y_s = [np.random.permutation(y) for _ in range(n_permutations)]
X_s = [X for _ in y_s]
fit_models = run_models(model, X_s, y_s)
for model in fit_models:
splits_run = feature_split_proportions(model, list(range(X.shape[1])))
for key, value in splits_run.items():
inclusion_dict[key].append(value)
return inclusion_dict
def plot_null_feature_importance_distributions(null_distributions: Mapping[int, List[float]], ax=None) -> None:
if ax is None:
_, ax = plt.subplots(1, 1)
df = pd.DataFrame(null_distributions)
df = pd.DataFrame(df.unstack()).reset_index().drop("level_1", axis=1)
df.columns = ["variable", "p"]
sns.boxplot(x="variable", y="p", data=df, ax=ax)
ax.set_title("Null Feature Importance Distribution")
return ax
def local_thresholds(null_distributions: ImportanceDistributionMap, percentile: float) -> Mapping[int, float]:
"""
Calculate the required proportion of splits to be selected by variable
Creates a null distribution for each variable based on the % of splits including that variable in each of the permuted models
Each variable has its own threshold that is independent of the other variables
Note - this is significantly less stringent than the global threshold
Parameters
----------
null_distributions: ImportanceDistributionMap
A mapping from variable to distribution of split inclusion proportions under the null
percentile: float
The percentile of the null distribution to use as a cutoff.
The closer to 1.0, the more stringent the threshold
Returns
-------
Mapping[int, float]
A lookup from column to % inclusion threshold
"""
return {feature: np.percentile(null_distributions[feature], percentile) for feature in null_distributions}
def global_thresholds(null_distributions: ImportanceDistributionMap, percentile: float) -> Mapping[int, float]:
"""
Calculate the required proportion of splits to be selected by variable
Creates a distribution of the _highest_ inclusion percentage of any variable in each of the permuted models
Threshold is set as a percentile of this distribution
All variables have the same threshold
Note that this is significantly more stringent than the local threshold
Parameters
----------
null_distributions: ImportanceDistributionMap
A mapping from variable to distribution of split inclusion proportions under the null
percentile: float
The percentile of the null distribution to use as a cutoff.
The closer to 1.0, the more stringent the threshold
Returns
-------
Mapping[int, float]
A lookup from column to % inclusion threshold
"""
q_s = []
df = pd.DataFrame(null_distributions)
for row in df.iter_rows():
q_s.append(np.max(row))
threshold = np.percentile(q_s, percentile)
return {feature: threshold for feature in null_distributions}
def kept_features(feature_proportions: Mapping[int, float], thresholds: Mapping[int, float]) -> List[int]:
"""
Extract the features to keep
Parameters
----------
feature_proportions: Mapping[int, float]
Lookup from variable to % of splits in the model that use that variable
thresholds: Mapping[int, float]
Lookup from variable to required % of splits in the model to be kept
Returns
-------
List[int]
Variable selected for inclusion in the final model
"""
return [x[0] for x in zip(sorted(feature_proportions.keys()), is_kept(feature_proportions, thresholds)) if x[1]]
def is_kept(feature_proportions: Mapping[int, float], thresholds: Mapping[int, float]) -> List[bool]:
"""
Determine whether each variable should be kept after selection
Parameters
----------
feature_proportions: Mapping[int, float]
Lookup from variable to % of splits in the model that use that variable
thresholds: Mapping[int, float]
Lookup from variable to required % of splits in the model to be kept
Returns
-------
List[bool]
An array of length equal to the width of the covariate matrix
True if the variable should be kept, False otherwise
"""
print(sorted(list(feature_proportions.keys())))
return [feature_proportions[feature] > thresholds[feature] for feature in sorted(list(feature_proportions.keys()))]
def partition_into_passed_and_failed_features(feature_proportions, thresholds):
kept = kept_features(feature_proportions, thresholds)
passed_features = {x[0]: x[1] for x in feature_proportions.items() if x[0] in kept}
failed_features = {x[0]: x[1] for x in feature_proportions.items() if x[0] not in kept}
return passed_features, failed_features
def plot_feature_proportions_against_thresholds(feature_proportions, thresholds, ax=None):
if ax is None:
_, ax = plt.subplots(1, 1)
passed_features, failed_features = partition_into_passed_and_failed_features(feature_proportions, thresholds)
ax.bar(thresholds.keys(), [x * 100 for x in thresholds.values()], width=0.01, color="black", alpha=0.5)
ax.scatter(passed_features.keys(), [x * 100 for x in passed_features.values()], c="g")
ax.scatter(failed_features.keys(), [x * 100 for x in failed_features.values()], c="r")
ax.set_title("Feature Importance Compared to Threshold")
ax.set_xlabel("Feature")
ax.set_ylabel("% Splits")
return ax
|
{
"content_hash": "83c0d9ef5335339f508f52be03c47a3c",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 129,
"avg_line_length": 37.229729729729726,
"alnum_prop": 0.673805202661827,
"repo_name": "JakeColtman/bartpy",
"id": "b15150641ad446dea195c906961c56fc57b4086a",
"size": "8265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bartpy/diagnostics/features.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133623"
}
],
"symlink_target": ""
}
|
import os
import contextlib
import functools
import gc
import socket
import sys
import types
import warnings
_DEPR_MSG = (
"pyarrow.{} is deprecated as of {}, please use pyarrow.{} instead."
)
def implements(f):
def decorator(g):
g.__doc__ = f.__doc__
return g
return decorator
def _deprecate_api(old_name, new_name, api, next_version, type=FutureWarning):
msg = _DEPR_MSG.format(old_name, next_version, new_name)
def wrapper(*args, **kwargs):
warnings.warn(msg, type)
return api(*args, **kwargs)
return wrapper
def _deprecate_class(old_name, new_class, next_version,
instancecheck=True):
"""
Raise warning if a deprecated class is used in an isinstance check.
"""
class _DeprecatedMeta(type):
def __instancecheck__(self, other):
warnings.warn(
_DEPR_MSG.format(old_name, next_version, new_class.__name__),
FutureWarning,
stacklevel=2
)
return isinstance(other, new_class)
return _DeprecatedMeta(old_name, (new_class,), {})
def _is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def _is_path_like(path):
return isinstance(path, str) or hasattr(path, '__fspath__')
def _stringify_path(path):
"""
Convert *path* to a string or unicode path if possible.
"""
if isinstance(path, str):
return os.path.expanduser(path)
# checking whether path implements the filesystem protocol
try:
return os.path.expanduser(path.__fspath__())
except AttributeError:
pass
raise TypeError("not a path-like object")
def product(seq):
"""
Return a product of sequence items.
"""
return functools.reduce(lambda a, b: a*b, seq, 1)
def get_contiguous_span(shape, strides, itemsize):
"""
Return a contiguous span of N-D array data.
Parameters
----------
shape : tuple
strides : tuple
itemsize : int
Specify array shape data
Returns
-------
start, end : int
The span end points.
"""
if not strides:
start = 0
end = itemsize * product(shape)
else:
start = 0
end = itemsize
for i, dim in enumerate(shape):
if dim == 0:
start = end = 0
break
stride = strides[i]
if stride > 0:
end += stride * (dim - 1)
elif stride < 0:
start += stride * (dim - 1)
if end - start != itemsize * product(shape):
raise ValueError('array data is non-contiguous')
return start, end
def find_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(sock) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return sock.getsockname()[1]
def guid():
from uuid import uuid4
return uuid4().hex
def _break_traceback_cycle_from_frame(frame):
# Clear local variables in all inner frames, so as to break the
# reference cycle.
this_frame = sys._getframe(0)
refs = gc.get_referrers(frame)
while refs:
for frame in refs:
if frame is not this_frame and isinstance(frame, types.FrameType):
break
else:
# No frame found in referrers (finished?)
break
refs = None
# Clear the frame locals, to try and break the cycle (it is
# somewhere along the chain of execution frames).
frame.clear()
# To visit the inner frame, we need to find it among the
# referrers of this frame (while `frame.f_back` would let
# us visit the outer frame).
refs = gc.get_referrers(frame)
refs = frame = this_frame = None
|
{
"content_hash": "36fe26addee437def27ad023e9b0316e",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 78,
"avg_line_length": 25.405228758169933,
"alnum_prop": 0.5847697453048624,
"repo_name": "apache/arrow",
"id": "0e0f3e72650834fa16139ee519c85b19feb897dc",
"size": "4703",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/pyarrow/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "3709"
},
{
"name": "Batchfile",
"bytes": "30689"
},
{
"name": "C",
"bytes": "1400442"
},
{
"name": "C#",
"bytes": "1029129"
},
{
"name": "C++",
"bytes": "24661612"
},
{
"name": "CMake",
"bytes": "709915"
},
{
"name": "Cython",
"bytes": "1554440"
},
{
"name": "Dockerfile",
"bytes": "147322"
},
{
"name": "Emacs Lisp",
"bytes": "1064"
},
{
"name": "FreeMarker",
"bytes": "2312"
},
{
"name": "Go",
"bytes": "4586449"
},
{
"name": "HTML",
"bytes": "3430"
},
{
"name": "Java",
"bytes": "7045674"
},
{
"name": "JavaScript",
"bytes": "127157"
},
{
"name": "Jinja",
"bytes": "19948"
},
{
"name": "Lua",
"bytes": "8771"
},
{
"name": "MATLAB",
"bytes": "40399"
},
{
"name": "Makefile",
"bytes": "32873"
},
{
"name": "Meson",
"bytes": "69508"
},
{
"name": "Objective-C++",
"bytes": "11472"
},
{
"name": "Perl",
"bytes": "3803"
},
{
"name": "Python",
"bytes": "3055602"
},
{
"name": "R",
"bytes": "1561613"
},
{
"name": "Ruby",
"bytes": "1615226"
},
{
"name": "Shell",
"bytes": "389942"
},
{
"name": "Thrift",
"bytes": "34246"
},
{
"name": "TypeScript",
"bytes": "1075563"
},
{
"name": "Vala",
"bytes": "24798"
}
],
"symlink_target": ""
}
|
from recommender.database.daos import PositionDao, RatingDao
class UserWithSkills(object):
def __init__(self, user):
self.user = user
self.skills = None
def getSkills(self):
if self.skills is None:
raise Exception("You need to call loadSkills or loadExtendedSkills first!")
return self.skills
def setSkills(self, skills):
self.skills = skills
def getID(self):
return self.user.id
def __str__(self):
return 'UserWithSkills: %s %s, %s' % (self.user.firstName, self.user.lastName, str(self.skills))
def getUserDataForClassifier(self, session, match, fallbackAvgRating):
"""
Gathers user data from the DB for better machine learning results.
:param session:
:param fallbackAvgRating:
:param match:
:return:
"""
positionDao = PositionDao(session)
ratingDao = RatingDao(session)
activeProjects = len(positionDao.getPositionsOfUser(self.user.id))
userRatings = [rating.rating for rating in ratingDao.getRatingsForUser(self.user.id)]
if len(userRatings) > 0:
avgRating = sum(userRatings) / len(userRatings)
else:
avgRating = fallbackAvgRating
#TODO hours already occupied in other projects
return [match, activeProjects, avgRating]
|
{
"content_hash": "049d1b51eb3e8f049e84c3c87bbb3e65",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 104,
"avg_line_length": 30.466666666666665,
"alnum_prop": 0.6404084609773888,
"repo_name": "ScJa/projectr",
"id": "7e20d1001eba2594f03b7a965a4616ce685979c6",
"size": "1371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recommender/recommender/core/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4223"
},
{
"name": "HTML",
"bytes": "161064"
},
{
"name": "JavaScript",
"bytes": "196604"
},
{
"name": "Python",
"bytes": "58619"
}
],
"symlink_target": ""
}
|
"""Tests for external_optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# pylint: disable=g-import-not-at-top,unused-import
try:
import __builtin__ as builtins
except ImportError:
import builtins
class MockOptimizerInterface(tf.contrib.opt.ExternalOptimizerInterface):
NUM_STEP_CALLS = 5
NUM_LOSS_CALLS = 2
def _minimize(self, initial_val, loss_grad_func, step_callback,
optimizer_kwargs, **unused_kwargs):
"""Minimize (x - x0)**2 / 2 with respect to x."""
for _ in range(self.NUM_LOSS_CALLS):
loss_grad_func(initial_val)
for _ in range(self.NUM_STEP_CALLS):
step_callback(initial_val)
_, grad = loss_grad_func(initial_val)
return initial_val - grad
class TestCase(tf.test.TestCase):
def assertAllClose(self, array1, array2):
array1 = np.asarray(array1)
array2 = np.asarray(array2)
if not array1.shape:
array1 = np.array([array1])
if not array2.shape:
array2 = np.array([array2])
super(TestCase, self).assertAllClose(array1, array2, rtol=1e-5, atol=1e-5)
class ExternalOptimizerInterfaceTest(TestCase):
def test_optimize(self):
scalar = tf.Variable(tf.random_normal([]), 'scalar')
vector = tf.Variable(tf.random_normal([2]), 'vector')
matrix = tf.Variable(tf.random_normal([2, 3]), 'matrix')
minimum_location = tf.constant(np.arange(9), dtype=tf.float32)
loss = tf.reduce_sum(tf.square(vector - minimum_location[:2])) / 2.
loss += tf.reduce_sum(tf.square(scalar - minimum_location[2])) / 2.
loss += tf.reduce_sum(tf.square(
matrix - tf.reshape(minimum_location[3:], [2, 3]))) / 2.
optimizer = MockOptimizerInterface(loss)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
optimizer.minimize(sess)
self.assertAllClose(np.arange(2), sess.run(vector))
self.assertAllClose(np.arange(1) + 2, sess.run(scalar))
self.assertAllClose(np.arange(6).reshape(2, 3) + 3, sess.run(matrix))
def test_callbacks(self):
vector_val = np.array([7., -2.], dtype=np.float32)
vector = tf.Variable(vector_val, 'vector')
minimum_location_val = np.arange(2)
minimum_location = tf.constant(minimum_location_val, dtype=tf.float32)
loss = tf.reduce_sum(tf.square(vector - minimum_location)) / 2.
loss_val = ((vector_val - minimum_location_val)**2).sum() / 2.
optimizer = MockOptimizerInterface(loss)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
initial_vector_val = sess.run(vector)
extra_fetches = [loss]
step_callback = tf.test.mock.Mock()
loss_callback = tf.test.mock.Mock()
optimizer.minimize(
sess, fetches=extra_fetches, loss_callback=loss_callback,
step_callback=step_callback)
call = tf.test.mock.call(loss_val)
loss_calls = [call] * MockOptimizerInterface.NUM_LOSS_CALLS
loss_callback.assert_has_calls(loss_calls)
args, _ = step_callback.call_args
self.assertAllClose(initial_vector_val, args[0])
class ScipyOptimizerInterfaceTest(TestCase):
def test_unconstrained(self):
def objective(x):
"""Rosenbrock function. (Carl Edward Rasmussen, 2001-07-21).
f(x) = sum_{i=1:D-1} 100*(x(i+1) - x(i)^2)^2 + (1-x(i))^2
Args:
x: a Variable
Returns:
f: a tensor (objective value)
"""
d = tf.size(x)
s = tf.add(100 * tf.square(
tf.sub(
tf.strided_slice(x, [1], [d]),
tf.square(tf.strided_slice(x, [0], [d - 1])))),
tf.square(tf.sub(1.0, tf.strided_slice(x, [0], [d - 1]))))
return tf.reduce_sum(s)
dimension = 5
x = tf.Variable(tf.zeros(dimension))
optimizer = tf.contrib.opt.ScipyOptimizerInterface(objective(x))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
optimizer.minimize(sess)
self.assertAllClose(np.ones(dimension), sess.run(x))
def test_nonlinear_programming(self):
vector_initial_value = [7., 7.]
vector = tf.Variable(vector_initial_value, 'vector')
# Make norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
# Ensure y = 1.
equalities = [vector[1] - 1.]
# Ensure x >= 1. Thus optimum should be at (1, 1).
inequalities = [vector[0] - 1.]
optimizer = tf.contrib.opt.ScipyOptimizerInterface(
loss, equalities=equalities, inequalities=inequalities,
method='SLSQP')
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
optimizer.minimize(sess)
self.assertAllClose(np.ones(2), sess.run(vector))
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "3086eb079e8c94b4e8815d335312d4f7",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 78,
"avg_line_length": 30.1875,
"alnum_prop": 0.6443064182194617,
"repo_name": "ppries/tensorflow",
"id": "9dd64e5b32514c00be917cad2fccd2d6b2974666",
"size": "5520",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/opt/python/training/external_optimizer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "118101"
},
{
"name": "C++",
"bytes": "14610065"
},
{
"name": "CMake",
"bytes": "110931"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "96398"
},
{
"name": "HTML",
"bytes": "533840"
},
{
"name": "Java",
"bytes": "179112"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833491"
},
{
"name": "Makefile",
"bytes": "23553"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "151841"
},
{
"name": "Python",
"bytes": "14778281"
},
{
"name": "Shell",
"bytes": "310226"
},
{
"name": "TypeScript",
"bytes": "757225"
}
],
"symlink_target": ""
}
|
from core.himesis import Himesis
import uuid
class HConnECU2VirtualDevice2(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule ConnECU2VirtualDevice2.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HConnECU2VirtualDevice2, self).__init__(name='HConnECU2VirtualDevice2', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """ConnECU2VirtualDevice2"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'ConnECU2VirtualDevice2')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """ConnECU2VirtualDevice2"""
# match class PhysicalNode(6.0.m.0PhysicalNode) node
self.add_node()
self.vs[3]["mm__"] = """PhysicalNode"""
self.vs[3]["attr1"] = """+"""
# match class Partition(6.0.m.1Partition) node
self.add_node()
self.vs[4]["mm__"] = """Partition"""
self.vs[4]["attr1"] = """+"""
# apply class SwcToEcuMapping(6.0.a.0SwcToEcuMapping) node
self.add_node()
self.vs[5]["mm__"] = """SwcToEcuMapping"""
self.vs[5]["attr1"] = """1"""
# apply class EcuInstance(6.0.a.1EcuInstance) node
self.add_node()
self.vs[6]["mm__"] = """EcuInstance"""
self.vs[6]["attr1"] = """1"""
# match association PhysicalNode--partition-->Partition node
self.add_node()
self.vs[7]["attr1"] = """partition"""
self.vs[7]["mm__"] = """directLink_S"""
# apply association SwcToEcuMapping--ecuInstance-->EcuInstance node
self.add_node()
self.vs[8]["attr1"] = """ecuInstance"""
self.vs[8]["mm__"] = """directLink_T"""
# backward association EcuInstance-->PhysicalNodenode
self.add_node()
self.vs[9]["mm__"] = """backward_link"""
# backward association SwcToEcuMapping-->Partitionnode
self.add_node()
self.vs[10]["mm__"] = """backward_link"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class PhysicalNode(6.0.m.0PhysicalNode)
(0,4), # matchmodel -> match_class Partition(6.0.m.1Partition)
(1,5), # applymodel -> apply_classSwcToEcuMapping(6.0.a.0SwcToEcuMapping)
(1,6), # applymodel -> apply_classEcuInstance(6.0.a.1EcuInstance)
(3,7), # match classPhysicalNode(6.0.m.0PhysicalNode) -> association partition
(7,4), # associationpartition -> match_classPhysicalNode(6.0.m.1Partition)
(5,8), # apply class SwcToEcuMapping(6.0.a.0SwcToEcuMapping) -> association ecuInstance
(8,6), # associationecuInstance -> apply_classEcuInstance(6.0.a.1EcuInstance)
(6,9), # apply class EcuInstance(6.0.m.0PhysicalNode) -> backward_association
(9,3), # backward_associationPhysicalNode -> match_class PhysicalNode(6.0.m.0PhysicalNode)
(5,10), # apply class SwcToEcuMapping(6.0.m.1Partition) -> backward_association
(10,4), # backward_associationPartition -> match_class Partition(6.0.m.1Partition)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
self["equations"] = []
|
{
"content_hash": "d84eb7f612d02548a4654011023f4e8a",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 102,
"avg_line_length": 36.15909090909091,
"alnum_prop": 0.6621621621621622,
"repo_name": "levilucio/SyVOLT",
"id": "660ac02665404209b78a5c836e6e2d48f3ce1971",
"size": "3182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GM2AUTOSAR_MM/transformation_from_MPS/HConnECU2VirtualDevice2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Simple charts from HBase small tables, based on the HBase REST server, Pygal, Flask and Requests
'''
from flask import Flask, make_response
from flask import render_template
from werkzeug.routing import BaseConverter, ValidationError
import pygal
from pygal.style import DarkSolarizedStyle
import requests
from base64 import b64decode
from operator import itemgetter
app = Flask(__name__)
'''
Dictionary from chart type as string to a format string for the URL to get the chart as svg.
Each function registers itself in this dictionary
'''
_supported_chart_types = {}
'''
Example URLs supported by HBase
curl -H "Accept: application/json" http://localhost:9998/test_hbase_py_client/john
curl -H "Accept: application/json" http://localhost:9998/test_hbase_py_client/john/visits
curl -H "Accept: application/json" http://localhost:9998/test_hbase_py_client/*
curl -H "Accept: application/json" http://localhost:9998/test_hbase_py_client/*/visits
Wildcards are supported when specifying the row key, even when a family is latter specified
[cloudera@localhost local]$ curl -H "Accept: application/json" http://localhost:9998/test_hbase_py_client/*/visits
{"Row":[{"key":"am9obg==","Cell":[{"column":"dmlzaXRzOmFtYXpvbi5jb20=","timestamp":1393791171026,"$":"NQ=="},{"column":"dmlzaXRzOmdvb2dsZS5lcw==","timestamp":1393791171063,"$":"Mg=="}]},{"key":"bWFyeQ==","Cell":[{"column":"dmlzaXRzOmFtYXpvbi5jb20=","timestamp":1393791171079,"$":"NA=="},{"column":"dmlzaXRzOmZhY2Vib29rLmNvbQ==","timestamp":1393791171098,"$":"Mg=="}]}]}[cloudera@localhost local]$
[cloudera@localhost local]$ curl -H "Accept: application/json" http://localhost:9998/test_hbase_py_client/*
{"Row":[{"key":"am9obg==","Cell":[{"column":"aW5mbzphZ2U=","timestamp":1393791170961,"$":"NDI="},{"column":"dmlzaXRzOmFtYXpvbi5jb20=","timestamp":1393791171026,"$":"NQ=="},{"column":"dmlzaXRzOmdvb2dsZS5lcw==","timestamp":1393791171063,"$":"Mg=="}]},{"key":"bWFyeQ==","Cell":[{"column":"aW5mbzphZ2U=","timestamp":1393791170995,"$":"MjY="},{"column":"dmlzaXRzOmFtYXpvbi5jb20=","timestamp":1393791171079,"$":"NA=="},{"column":"dmlzaXRzOmZhY2Vib29rLmNvbQ==","timestamp":1393791171098,"$":"Mg=="}]}]}[cloudera@localhost local]$
'''
_get_hBase_rows_format = 'http://{server}/{table}/{row_keys}/{family}'
_get_hBase_rows_headers = {'accept': 'application/json'}
def get_hBase_rows(server, table, row_keys, family=''):
'''
Queries HBase for a the last version of the cells in a table and row key, and optionally for a
particular column family.
Values are decoded from base64
:param server: e.g. 'localhost:9998'
:param table name of the HBase table
:param row_keys key of the HBase row to obtain. Suffix globbing is supported as described
in http://wiki.apache.org/hadoop/Hbase/Stargate
:param family if this value is present only the cells in that family are obtained
:return None if there was some error with the request, otherwise
returns a list of dictionaries like
{'key': 'john', 'row': [{'qual': 'age', 'value': '42', 'family': 'info', 'timestamp': 1393791170961L}, {'qual': 'amazon.com', 'value': '5', 'family': 'visits', 'timestamp': 1393791171026L}, {'qual': 'google.es', 'value': '2', 'family': 'visits', 'timestamp': 1393791171063L}]}
for each of the keys specified, where decoding from base64 was already performed
Examples:
>>> get_hBase_rows("localhost:9998", "test_hbase_py_client", "john")
[{'key': 'john', 'row': [{'qual': 'age', 'value': '42', 'family': 'info', 'timestamp': 1393791170961L}, {'qual': 'amazon.com', 'value': '5', 'family': 'visits', 'timestamp': 1393791171026L}, {'qual': 'google.es', 'value': '2', 'family': 'visits', 'timestamp': 1393791171063L}]}]
>>> get_hBase_rows("localhost:9998", "test_hbase_py_client", "john", "visits")
[{'key': 'john', 'row': [{'qual': 'amazon.com', 'value': '5', 'family': 'visits', 'timestamp': 1393791171026L}, {'qual': 'google.es', 'value': '2', 'family': 'visits', 'timestamp': 1393791171063L}]}]
'''
# TODO: try - except for the request and extra argument for the timeout of the request
try:
hbase_request = requests.get(_get_hBase_rows_format.format(server=server, table=table, row_keys=row_keys, family=family),
headers=_get_hBase_rows_headers)
except:
return None
if hbase_request.status_code == 404:
return []
return [{'key' : b64decode(row['key']),
'row' : [{'family' : column[:sep_idx], 'qual' : column[sep_idx + 1:],
'value' : b64decode(cell['$']), 'timestamp' : long(cell['timestamp']) }
for cell in row['Cell']
for column in (b64decode(cell['column']), )
for sep_idx in (column.find(':'), ) ]}
for row in hbase_request.json()['Row']]
_supported_chart_types['bar'] = '/hbase/svg/bar/{server}/{table}/{title}/{family}/{row_keys}'
@app.route('/hbase/svg/bar/<server>/<table>/<title>/<family>/<path:row_keys>')
def svg_barchart_for_hbase_rows(server, table, title, family, row_keys):
'''
A chart will be build from the values of the cells in that column family
* The x-axis labels are the column qualifiers found in all cells for the keys
* For each key there is a bar group with a bar in each point of the x-axis. None
is used to fill missing values for a qual, with the usual meaning in pygal (no
value will be shown for that bar group at the point)
NOTE: assuming all the values are of type float
NOTE: taking last version of each cell
:param row_keys key of the HBase row to obtain, as a string of row keys separated
by '/', suffix globbing as described in http://wiki.apache.org/hadoop/Hbase/Stargate
is supported for each component of the string as obtained by row_keys.split('/')
'''
# Example URL: http://localhost:9999/hbase/svg/bar/localhost:9998/test_hbase_py_client/Sites%20Visited/visits/john/mary
# get values from HBase: don't forget conversion to number
# {row : { qual : value) } }
rows = { row['key'] : { cell['qual'] : float(cell['value']) for cell in row['row'] }
for row_key in row_keys.split('/')
for row in get_hBase_rows(server, table, row_key, family)
}
# get sorted values for x axis
x_labels = sorted({ qual for qual_vals in rows.values() for qual in qual_vals.keys() })
# build an SVG chart
# TODO: consider specifying styles (e.g. chart = pygal.Bar(style=DarkSolarizedStyle))
chart = pygal.Bar()
chart.title = title
chart.x_labels = x_labels
# add the values for each key
for key, qual_vals in rows.iteritems():
# use get to fill spaces with None
chart.add(key, [ qual_vals.get(label) for label in x_labels ])
# return as a Flask response
return chart.render_response()
class ChartsSpecsConverter(BaseConverter):
'''
Custom URL converter for the chart specifications
'''
def __init__(self, url_map):
super(ChartsSpecsConverter, self).__init__(url_map)
self.regex = '(?:.*(?=/keys))'
def to_python(self, value):
'''
For tuples (chart_type, chart_title, family)
e.g. value is 'bar/Sites%20Visited%20Bar/visits/pie/Sites%20Visited%20Pie/visits'
Apply validation rules here, e.g.
- valid chart types: see variable chart_types
- each chart spec must be a 3 elements
- same chart is not specified twice
Return a list of dictionaries corresponding to spec tuples,
with keys 'chart_type', 'title', 'family'
'''
split_value = value.split('/')
n_splits = len(split_value)
tuple_size = 3
if (n_splits % tuple_size) != 0:
raise ValidationError("Chart specs must be 3 elements tuples of the shape (chart_type, chart_title, family)")
specs = [ split_value[spec_idx * tuple_size : (spec_idx + 1) * tuple_size ]
for spec_idx in xrange(0, n_splits / tuple_size) ]
titles = {}
for spec in specs:
chart_type, title = spec[0], spec[1]
if chart_type not in _supported_chart_types.keys():
raise ValidationError("Unknow chart type {chart_type} for chart specification {chart_spec}".format(chart_type=chart_type, chart_spec=spec))
if title in titles:
raise ValidationError("Title {title} appears twice in chart specification {chart_spec}".format(title=title, chart_spec=spec))
return [{'chart_type' : spec[0], 'title' : spec[1], 'family' : spec[2]} for spec in specs ]
def to_url(self, specs):
# Don't forget to eliminate trailing '/'
return '/'.join(('/'.join(spec) for spec in specs))[:-1]
# Register the converter
app.url_map.converters['charts_specs'] = ChartsSpecsConverter
_charts_table_template='charts_table.html'
@app.route('/hbase/charts/<server>/<table>/width/<int:table_width>/cols/<int:num_cols>/refresh/<int:refresh>/<charts_specs:charts>/keys/<path:row_keys>')
def charts_table(server, table, table_width, num_cols, refresh, charts, row_keys):
'''
By default jinja2 will look for templates at the templates folder
in the root of the application.
By using the template be get autorefresh using a <meta> header
TODO: configurable title for the whole chart
TODO: consider other routing '/hbase/charts2/<server>/<table>/cols/<int:num_cols>/refresh/<int:refresh>/<charts_spec_2:charts>'
with charts_spec_b for tuples (char_type, chart_title, family, row_key) with different rows for the charts and grouping the
tuples by chart_title. Note that implies richer checks of the URL, as for example all the tuples with the same chart title
should have the same type => that suggests that maybe a better URL schema would first declare types for the chart titles
and then entries as pairs (family, row_key)
Example URLs:
http://localhost:9999/hbase/charts/localhost:9998/test_hbase_py_client/width/1500/cols/2/refresh/500/bar/Sites%20Visited/visits/bar/Info/info/keys/*
http://localhost:9999/hbase/charts/localhost:9998/test_hbase_py_client/width/1500/cols/2/refresh/500/bar/Sites%20Visited/visits/bar/Info/info/keys/mary/john
http://localhost:9999/hbase/charts/localhost:9998/test_hbase_py_client/width/850/cols/1/refresh/5/bar/Sites%20Visited/visits/bar/Info/info/keys/*
'''
spec_rows = [charts[i*num_cols : (i+1)*num_cols] for i in xrange(0, len(charts)/num_cols +1)]
# drop last row in case it's empty (when len(charts) % num_cols) == 0)
spec_rows = spec_rows if spec_rows[-1] != [] else spec_rows[:-1]
def update_chart_dict(spec):
spec.update({'server' : server, 'table' : table, 'row_keys' : row_keys})
return spec
chart_src_rows = [ [_supported_chart_types[spec['chart_type']].format(**update_chart_dict(spec)) for spec in row] for row in spec_rows ]
return render_template(_charts_table_template, table_width=table_width, refresh_rate=refresh,
title="HBase Chart", chart_src_rows=chart_src_rows)
if __name__ == '__main__':
import sys
print 'Usage: <port>'
port = int(sys.argv[1])
# FIXME delete
print 'Sample URLs:'
print 'http://localhost:9999/hbase/svg/bar/localhost:9998/test_hbase_py_client/Sites%20Visited/visits/john/mary'
print 'http://localhost:9999/hbase/svg/bar/localhost:9998/test_hbase_py_client/Info/info/*'
print 'http://localhost:9999/hbase/charts/localhost:9998/test_hbase_py_client/width/1500/cols/2/refresh/500/bar/Sites%20Visited/visits/bar/Info/info/keys/mary/john'
print 'http://localhost:9999/hbase/charts/localhost:9998/test_hbase_py_client/width/1500/cols/2/refresh/500/bar/Sites%20Visited/visits/bar/Info/info/keys/*'
print 'http://localhost:9999/hbase/charts/localhost:9998/test_hbase_py_client/width/850/cols/1/refresh/5/bar/Sites%20Visited/visits/bar/Info/info/keys/*'
app.run(debug=True, port=port)
|
{
"content_hash": "259594f821b0719f5598efc385aa788f",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 527,
"avg_line_length": 57.34272300469483,
"alnum_prop": 0.6602259701981333,
"repo_name": "juanrh/data42",
"id": "fd2c302a537ee5894445520986a0e817e1aca425",
"size": "12214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hbase_rest_charts/python/hbase_rest_charts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2039"
},
{
"name": "Haskell",
"bytes": "2797"
},
{
"name": "Java",
"bytes": "50740"
},
{
"name": "Python",
"bytes": "83568"
},
{
"name": "Scala",
"bytes": "18030"
},
{
"name": "Shell",
"bytes": "7295"
}
],
"symlink_target": ""
}
|
__name__ = "tlsh_bh_tool"
__version__ = 1.0
__author__ = "Jayson Pryde"
import os, sys, tlsh, argparse, threading, pefile, requests
import logging, logging.config, hashlib, ConfigParser, simplejson
from extended_file_properties import extended_file_properties as efp
from os.path import getsize
class TlshStruct:
files = []
threads = []
flag = True
out = None
lock = None
logger = None
outname = None
counter = int(0)
thread_count = int(0)
restrict = None
query_url = None
file_basic_details = {}
file_prop_details = {}
file_cert_details = {}
def main(options):
if _init(options):
if os.path.isfile(options.file): _processFile1(options.file)
elif os.path.isdir(options.file):
_enumerateFiles(options.file)
_initOut()
_initScanningThreads()
_startScanningThreads()
_stopScanningThreads()
_deinitOut()
def _enumerateFiles(folder_path):
for root, dirs, files in os.walk(folder_path):
for name in files: TlshStruct.files.append(os.path.join(root, name))
def _init(options):
ret = True
try:
TlshStruct.thread_count = int(options.thread_count)
logging.config.fileConfig('tlsh_bh_tool.conf')
TlshStruct.logger = logging.getLogger('tlshbh')
cfg = ConfigParser.ConfigParser()
cfg.readfp(open("./tlsh_bh_tool.cfg"))
TlshStruct.file_basic_details["apikey"] = cfg.get('Credentials', 'apikey')
TlshStruct.file_basic_details["user"] = cfg.get('Credentials', 'user')
TlshStruct.query_url = cfg.get('Webservice', 'query_url')
TlshStruct.restrict = int(options.restrict)
TlshStruct.outname = options.out
except Exception, ex:
print "ERROR: Problem during initialization : %s" % ex
ret = False
finally:
return ret
def _initOut():
try:
TlshStruct.out = open(TlshStruct.outname, "w")
except Exception, ex:
print "ERROR: Problem initializing output : %s" % ex
def _deinitOut():
if TlshStruct.out is not None: TlshStruct.out.close()
def _initScanningThreads():
try:
TlshStruct.lock = threading.Lock()
for i in range(0, TlshStruct.thread_count): TlshStruct.threads.append(threading.Thread(target=_processFile2))
except Exception, ex:
print "ERROR: Problem in initializing scanning threads : %s" % ex
def _startScanningThreads():
for thr in TlshStruct.threads: thr.start()
def _stopScanningThreads():
for thr in TlshStruct.threads:
if thr.isAlive(): thr.join()
def _getSha256(filename):
h = None
sha256 = "NULL"
try:
h = hashlib.sha256()
with open(filename, "rb") as f:
while True:
block = f.read(2**12)
if not block: break
h.update(block)
sha256 = str(h.hexdigest())
except Exception, ex:
TlshStruct.logger.error("Problem in getting SHA1 of %s : %s" % (filename, ex))
sha256 = "ERROR"
finally:
return sha256
def _sendQuery():
result = {}
params = dict(TlshStruct.file_basic_details.items() + TlshStruct.file_prop_details.items() + TlshStruct.file_cert_details.items())
try:
response = requests.get(TlshStruct.query_url, params=params, verify=False)
if response.status_code == 200: result = simplejson.loads(response.content)
except Exception, ex:
TlshStruct.logger.error("Problem in sending query : %s" % ex)
finally:
return result
def _resetFileDetails():
TlshStruct.file_basic_details["tlsh"] = ""
TlshStruct.file_basic_details["sha256"] = ""
TlshStruct.file_prop_details = {}
TlshStruct.file_cert_details = {}
def _recordResults(result):
if result is None: return
if result["status"] == "ok":
if len(result["matches"]) != 0:
if TlshStruct.out is None:
try:
f = open(TlshStruct.outname, "w")
for r in result["matches"]:
f.write("%s,%s,%s,%s,%s\n" % (TlshStruct.file_basic_details["sha256"], TlshStruct.file_basic_details["tlsh"],
r["id"], r["tag"], r["distance_score"]))
f.close()
except Exception, ex:
TlshStruct.logger.error("Cannot write file %s : %s" % (TlshStruct.outname, ex))
else:
for r in result["matches"]:
TlshStruct.out.write("%s,%s,%s,%s,%s\n" % (TlshStruct.file_basic_details["sha256"], TlshStruct.file_basic_details["tlsh"],
r["id"], r["tag"], r["distance_score"]))
def _processFile1(filename):
print "Processing %s..." % filename
_resetFileDetails()
if getsize(filename) <= 512: TlshStruct.logger.error("File %s too small to compute tlsh value")
else:
result = None
try:
TlshStruct.file_basic_details["filename"] = filename
TlshStruct.file_basic_details["tlsh"] = tlsh.hash(open(filename, "rb").read())
TlshStruct.file_basic_details["sha256"] = _getSha256(filename)
if not TlshStruct.restrict:
prop_details = efp.getBasicFileProperties(filename)
cert_details = efp.getCertificateDetails(filename)
TlshStruct.file_prop_details = prop_details if prop_details is not None else {}
TlshStruct.file_cert_details = cert_details if cert_details is not None else {}
result = _sendQuery()
except Exception, ex:
print "ERROR: Problem in getting tlsh value of %s : %s" % (filename, ex)
tlsh_val = "error"
finally:
_recordResults(result)
def _processFile2():
while TlshStruct.flag:
TlshStruct.lock.acquire()
try:
filename = TlshStruct.files.pop()
_processFile1(filename)
except Exception, ex:
if TlshStruct.counter == TlshStruct.thread_count:
TlshStruct.flag = False
TlshStruct.counter = 0
else: TlshStruct.counter+=1
finally:
TlshStruct.lock.release()
def _showBanner():
if os.name == "nt": os.system("cls")
elif os.name == "posix": os.system("clear")
print "********************************"
print "* TLSH BlackHat Tool v1.0 *"
print "* Demo Version *"
print "********************************"
if __name__ == "tlsh_bh_tool":
_showBanner()
parser = argparse.ArgumentParser()
parser.add_argument("file", help="directory containing files | file")
parser.add_argument("-out", default="matches.csv", help="CSV file containing query results. Default is matches.csv")
parser.add_argument("-restrict", dest="restrict", default=0, help="0 == send all file properties | 1 == send only basic information (i.e. sha256 and tlsh).")
parser.add_argument("-tc", dest="thread_count", default=3, help="scanning thread count. Default is 3")
options = parser.parse_args()
main(options)
|
{
"content_hash": "60db0deebcbdb0e556c8c598d834e257",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 158,
"avg_line_length": 43.386243386243386,
"alnum_prop": 0.5179268292682927,
"repo_name": "pombredanne/tlsh",
"id": "0ee9e72faa06b5456685d28f7d7cf26361b583c5",
"size": "8200",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tlsh_bh_tool/tlsh_bh_tool.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1110"
},
{
"name": "C",
"bytes": "3969"
},
{
"name": "C++",
"bytes": "314517"
},
{
"name": "CMake",
"bytes": "14800"
},
{
"name": "HTML",
"bytes": "8734"
},
{
"name": "JavaScript",
"bytes": "19797"
},
{
"name": "Python",
"bytes": "2362"
},
{
"name": "Shell",
"bytes": "11661"
}
],
"symlink_target": ""
}
|
from django.test import TestCase, Client
from django.http import HttpRequest, Http404
from wagtail.wagtailcore.models import Page, Site
from wagtail.tests.models import EventPage
class TestRouting(TestCase):
fixtures = ['test.json']
def test_find_site_for_request(self):
default_site = Site.objects.get(is_default_site=True)
events_page = Page.objects.get(url_path='/home/events/')
events_site = Site.objects.create(hostname='events.example.com', root_page=events_page)
# requests without a Host: header should be directed to the default site
request = HttpRequest()
request.path = '/'
self.assertEqual(Site.find_for_request(request), default_site)
# requests with a known Host: header should be directed to the specific site
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = 'events.example.com'
self.assertEqual(Site.find_for_request(request), events_site)
# requests with an unrecognised Host: header should be directed to the default site
request = HttpRequest()
request.path = '/'
request.META['HTTP_HOST'] = 'unknown.example.com'
self.assertEqual(Site.find_for_request(request), default_site)
def test_urls(self):
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# Basic installation only has one site configured, so page.url will return local URLs
self.assertEqual(homepage.full_url, 'http://localhost/')
self.assertEqual(homepage.url, '/')
self.assertEqual(homepage.relative_url(default_site), '/')
self.assertEqual(christmas_page.full_url, 'http://localhost/events/christmas/')
self.assertEqual(christmas_page.url, '/events/christmas/')
self.assertEqual(christmas_page.relative_url(default_site), '/events/christmas/')
def test_urls_with_multiple_sites(self):
events_page = Page.objects.get(url_path='/home/events/')
events_site = Site.objects.create(hostname='events.example.com', root_page=events_page)
default_site = Site.objects.get(is_default_site=True)
homepage = Page.objects.get(url_path='/home/')
christmas_page = Page.objects.get(url_path='/home/events/christmas/')
# with multiple sites, page.url will return full URLs to ensure that
# they work across sites
self.assertEqual(homepage.full_url, 'http://localhost/')
self.assertEqual(homepage.url, 'http://localhost/')
self.assertEqual(homepage.relative_url(default_site), '/')
self.assertEqual(homepage.relative_url(events_site), 'http://localhost/')
self.assertEqual(christmas_page.full_url, 'http://events.example.com/christmas/')
self.assertEqual(christmas_page.url, 'http://events.example.com/christmas/')
self.assertEqual(christmas_page.relative_url(default_site), 'http://events.example.com/christmas/')
self.assertEqual(christmas_page.relative_url(events_site), '/christmas/')
def test_request_routing(self):
homepage = Page.objects.get(url_path='/home/')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
request = HttpRequest()
request.path = '/events/christmas/'
response = homepage.route(request, ['events', 'christmas'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context_data['self'], christmas_page)
used_template = response.resolve_template(response.template_name)
self.assertEqual(used_template.name, 'tests/event_page.html')
def test_route_to_unknown_page_returns_404(self):
homepage = Page.objects.get(url_path='/home/')
request = HttpRequest()
request.path = '/events/quinquagesima/'
with self.assertRaises(Http404):
homepage.route(request, ['events', 'quinquagesima'])
def test_route_to_unpublished_page_returns_404(self):
homepage = Page.objects.get(url_path='/home/')
request = HttpRequest()
request.path = '/events/tentative-unpublished-event/'
with self.assertRaises(Http404):
homepage.route(request, ['events', 'tentative-unpublished-event'])
class TestServeView(TestCase):
fixtures = ['test.json']
def test_serve(self):
c = Client()
response = c.get('/events/christmas/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'tests/event_page.html')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
self.assertEqual(response.context['self'], christmas_page)
self.assertContains(response, '<h1>Christmas</h1>')
self.assertContains(response, '<h2>Event</h2>')
def test_serve_unknown_page_returns_404(self):
c = Client()
response = c.get('/events/quinquagesima/')
self.assertEqual(response.status_code, 404)
def test_serve_unpublished_page_returns_404(self):
c = Client()
response = c.get('/events/tentative-unpublished-event/')
self.assertEqual(response.status_code, 404)
def test_serve_with_multiple_sites(self):
events_page = Page.objects.get(url_path='/home/events/')
events_site = Site.objects.create(hostname='events.example.com', root_page=events_page)
c = Client()
response = c.get('/christmas/', HTTP_HOST='events.example.com')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, 'tests/event_page.html')
christmas_page = EventPage.objects.get(url_path='/home/events/christmas/')
self.assertEqual(response.context['self'], christmas_page)
self.assertContains(response, '<h1>Christmas</h1>')
self.assertContains(response, '<h2>Event</h2>')
# same request to the default host should return a 404
c = Client()
response = c.get('/christmas/', HTTP_HOST='localhost')
self.assertEqual(response.status_code, 404)
|
{
"content_hash": "f238733e6d2cdb687bce23dbf257287f",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 107,
"avg_line_length": 44.30714285714286,
"alnum_prop": 0.667418990810898,
"repo_name": "suziesparkle/wagtail",
"id": "d08bf8ddd5b1fd47de250e3cf0bdb15701963b9b",
"size": "6203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail/wagtailcore/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Orange Canvas Resource Loader
"""
import os
import logging
log = logging.getLogger(__name__)
def package_dirname(package):
"""Return the directory path where package is located.
"""
if isinstance(package, str):
package = __import__(package, fromlist=[""])
filename = package.__file__
dirname = os.path.dirname(filename)
return dirname
def package(qualified_name):
"""Return the enclosing package name where qualified_name is located.
`qualified_name` can be a module inside the package or even an object
inside the module. If a package name itself is provided it is returned.
"""
try:
module = __import__(qualified_name, fromlist=[""])
except ImportError:
# qualified_name could name an object inside a module/package
if "." in qualified_name:
qualified_name, attr_name = qualified_name.rsplit(".", 1)
module = __import__(qualified_name, fromlist=[attr_name])
else:
raise
if module.__package__ is not None:
# the modules enclosing package
return module.__package__
else:
# 'qualified_name' is itself the package
assert(module.__name__ == qualified_name)
return qualified_name
dirname = os.path.abspath(os.path.dirname(__file__))
DEFAULT_SEARCH_PATHS = \
[("", dirname),
("", os.path.join(dirname, "../widgets"))]
del dirname
def default_search_paths():
return DEFAULT_SEARCH_PATHS
def add_default_search_paths(search_paths):
DEFAULT_SEARCH_PATHS.extend(search_paths)
def search_paths_from_description(desc):
"""Return the search paths for the Category/WidgetDescription.
"""
paths = []
if desc.package:
dirname = package_dirname(desc.package)
paths.append(("", dirname))
elif desc.qualified_name:
dirname = package_dirname(package(desc.qualified_name))
paths.append(("", dirname))
if hasattr(desc, "search_paths"):
paths.extend(desc.search_paths)
return paths
class resource_loader(object):
def __init__(self, search_paths=()):
self._search_paths = []
self.add_search_paths(search_paths)
@classmethod
def from_description(cls, desc):
"""Construct an resource from a Widget or Category
description.
"""
paths = search_paths_from_description(desc)
return icon_loader(search_paths=paths)
def add_search_paths(self, paths):
"""Add `paths` to the list of search paths.
"""
self._search_paths.extend(paths)
def search_paths(self):
"""Return a list of all search paths.
"""
return self._search_paths + default_search_paths()
def split_prefix(self, path):
"""Split prefixed path.
"""
if self.is_valid_prefixed(path) and ":" in path:
prefix, path = path.split(":", 1)
else:
prefix = ""
return prefix, path
def is_valid_prefixed(self, path):
i = path.find(":")
return i != 1
def find(self, name):
"""Find a resource matching `name`.
"""
prefix, path = self.split_prefix(name)
if prefix == "" and self.match(path):
return path
elif self.is_valid_prefixed(path):
for pp, search_path in self.search_paths():
if pp == prefix and \
self.match(os.path.join(search_path, path)):
return os.path.join(search_path, path)
return None
def match(self, path):
return os.path.exists(path)
def get(self, name):
return self.load(name)
def load(self, name):
return self.open(name).read()
def open(self, name):
path = self.find(name)
if path is not None:
return open(path, "rb")
else:
raise IOError(2, "Cannot find %r" % name)
import glob
class icon_loader(resource_loader):
DEFAULT_ICON = "icons/Unknown.png"
def match(self, path):
if resource_loader.match(self, path):
return True
return self.is_icon_glob(path)
def icon_glob(self, path):
name, ext = os.path.splitext(path)
pattern = name + "_*" + ext
return glob.glob(pattern)
def is_icon_glob(self, path):
name, ext = os.path.splitext(path)
pattern = name + "_*" + ext
return bool(glob.glob(pattern))
def get(self, name, default=None):
path = self.find(name)
if not path:
path = self.find(self.DEFAULT_ICON if default is None else default)
if not path:
raise IOError(2, "Cannot find %r in %s" %
(name, self.search_paths()))
if self.is_icon_glob(path):
icons = self.icon_glob(path)
else:
icons = [path]
from AnyQt.QtGui import QIcon
icon = QIcon()
for path in icons:
icon.addFile(path)
return icon
def open(self, name):
raise NotImplementedError
def load(self, name):
return self.get(name)
import unittest
class TestIconLoader(unittest.TestCase):
def setUp(self):
from AnyQt.QtWidgets import QApplication
self.app = QApplication([])
def tearDown(self):
self.app.exit()
del self.app
def test_loader(self):
loader = icon_loader()
self.assertEqual(loader.search_paths(), DEFAULT_SEARCH_PATHS)
icon = loader.get("icons/CanvasIcon.png")
self.assertTrue(not icon.isNull())
path = loader.find(":icons/CanvasIcon.png")
self.assertTrue(os.path.isfile(path))
icon = loader.get(":icons/CanvasIcon.png")
self.assertTrue(not icon.isNull())
def test_from_desc(self):
from .registry.description import (
WidgetDescription, CategoryDescription
)
desc = WidgetDescription.from_module(
"Orange.widgets.data.owfile"
)
loader = icon_loader.from_description(desc)
path = loader.find(desc.icon)
self.assertTrue(os.path.isfile(path))
icon = loader.get(desc.icon)
self.assertTrue(not icon.isNull())
desc = CategoryDescription.from_package("Orange.widgets.data")
loader = icon_loader.from_description(desc)
path = loader.find("icons/file.svg")
self.assertTrue(os.path.isfile(path))
icon = loader.get("icons/file.svg")
self.assertTrue(not icon.isNull())
def test_package_reflection(self):
from Orange.widgets.data import owfile
from Orange.widgets import data
package_name = data.__name__
p1 = package("Orange.widgets.data.owfile.OWFile")
self.assertEqual(p1, package_name)
p2 = package("Orange.widgets.data.owfile")
self.assertEqual(p2, package_name)
p3 = package("Orange.widgets.data")
self.assertEqual(p3, package_name)
p4 = package(owfile.__name__)
self.assertEqual(p4, package_name)
dirname = package_dirname(package_name)
self.assertEqual(dirname, os.path.dirname(data.__file__))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "d81ba42fdfe8d557f9f0fcf3a772f444",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 79,
"avg_line_length": 27.71647509578544,
"alnum_prop": 0.59524467790987,
"repo_name": "cheral/orange3",
"id": "2a415f64c2c3532c94d8507a67ab6c1cc709890d",
"size": "7234",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Orange/canvas/resources.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12023"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20217"
},
{
"name": "Python",
"bytes": "4139574"
},
{
"name": "Shell",
"bytes": "47441"
}
],
"symlink_target": ""
}
|
import _base
from html5lib.constants import voidElements, namespaces, prefixes
from xml.sax.saxutils import escape
# Really crappy basic implementation of a DOM-core like thing
class Node(_base.Node):
type = -1
def __init__(self, name):
self.name = name
self.parent = None
self.value = None
self.childNodes = []
self._flags = []
def __iter__(self):
for node in self.childNodes:
yield node
for item in node:
yield item
def __unicode__(self):
return self.name
def toxml(self):
raise NotImplementedError
def printTree(self, indent=0):
tree = '\n|%s%s' % (' '* indent, unicode(self))
for child in self.childNodes:
tree += child.printTree(indent + 2)
return tree
def appendChild(self, node):
if (isinstance(node, TextNode) and self.childNodes and
isinstance(self.childNodes[-1], TextNode)):
self.childNodes[-1].value += node.value
else:
self.childNodes.append(node)
node.parent = self
def insertText(self, data, insertBefore=None):
if insertBefore is None:
self.appendChild(TextNode(data))
else:
self.insertBefore(TextNode(data), insertBefore)
def insertBefore(self, node, refNode):
index = self.childNodes.index(refNode)
if (isinstance(node, TextNode) and index > 0 and
isinstance(self.childNodes[index - 1], TextNode)):
self.childNodes[index - 1].value += node.value
else:
self.childNodes.insert(index, node)
node.parent = self
def removeChild(self, node):
try:
self.childNodes.remove(node)
except:
# XXX
raise
node.parent = None
def cloneNode(self):
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self.childNodes)
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class Document(Node):
type = 1
def __init__(self):
Node.__init__(self, None)
def __unicode__(self):
return "#document"
def appendChild(self, child):
Node.appendChild(self, child)
def toxml(self, encoding="utf=8"):
result = ""
for child in self.childNodes:
result += child.toxml()
return result.encode(encoding)
def hilite(self, encoding="utf-8"):
result = "<pre>"
for child in self.childNodes:
result += child.hilite()
return result.encode(encoding) + "</pre>"
def printTree(self):
tree = unicode(self)
for child in self.childNodes:
tree += child.printTree(2)
return tree
def cloneNode(self):
return Document()
class DocumentFragment(Document):
type = 2
def __unicode__(self):
return "#document-fragment"
def cloneNode(self):
return DocumentFragment()
class DocumentType(Node):
type = 3
def __init__(self, name, publicId, systemId):
Node.__init__(self, name)
self.publicId = publicId
self.systemId = systemId
def __unicode__(self):
if self.publicId or self.systemId:
publicId = self.publicId or ""
systemId = self.systemId or ""
return """<!DOCTYPE %s "%s" "%s">"""%(
self.name, publicId, systemId)
else:
return u"<!DOCTYPE %s>" % self.name
toxml = __unicode__
def hilite(self):
return '<code class="markup doctype"><!DOCTYPE %s></code>' % self.name
def cloneNode(self):
return DocumentType(self.name, self.publicId, self.systemId)
class TextNode(Node):
type = 4
def __init__(self, value):
Node.__init__(self, None)
self.value = value
def __unicode__(self):
return u"\"%s\"" % self.value
def toxml(self):
return escape(self.value)
hilite = toxml
def cloneNode(self):
return TextNode(self.value)
class Element(Node):
type = 5
def __init__(self, name, namespace=None):
Node.__init__(self, name)
self.namespace = namespace
self.attributes = {}
def __unicode__(self):
if self.namespace == None:
return u"<%s>" % self.name
else:
return u"<%s %s>"%(prefixes[self.namespace], self.name)
def toxml(self):
result = '<' + self.name
if self.attributes:
for name,value in self.attributes.iteritems():
result += u' %s="%s"' % (name, escape(value,{'"':'"'}))
if self.childNodes:
result += '>'
for child in self.childNodes:
result += child.toxml()
result += u'</%s>' % self.name
else:
result += u'/>'
return result
def hilite(self):
result = '<<code class="markup element-name">%s</code>' % self.name
if self.attributes:
for name, value in self.attributes.iteritems():
result += ' <code class="markup attribute-name">%s</code>=<code class="markup attribute-value">"%s"</code>' % (name, escape(value, {'"':'"'}))
if self.childNodes:
result += ">"
for child in self.childNodes:
result += child.hilite()
elif self.name in voidElements:
return result + ">"
return result + '</<code class="markup element-name">%s</code>>' % self.name
def printTree(self, indent):
tree = '\n|%s%s' % (' '*indent, unicode(self))
indent += 2
if self.attributes:
for name, value in self.attributes.iteritems():
if isinstance(name, tuple):
name = "%s %s"%(name[0], name[1])
tree += '\n|%s%s="%s"' % (' ' * indent, name, value)
for child in self.childNodes:
tree += child.printTree(indent)
return tree
def cloneNode(self):
newNode = Element(self.name)
if hasattr(self, 'namespace'):
newNode.namespace = self.namespace
for attr, value in self.attributes.iteritems():
newNode.attributes[attr] = value
return newNode
class CommentNode(Node):
type = 6
def __init__(self, data):
Node.__init__(self, None)
self.data = data
def __unicode__(self):
return "<!-- %s -->" % self.data
def toxml(self):
return "<!--%s-->" % self.data
def hilite(self):
return '<code class="markup comment"><!--%s--></code>' % escape(self.data)
def cloneNode(self):
return CommentNode(self.data)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = CommentNode
fragmentClass = DocumentFragment
def testSerializer(self, node):
return node.printTree()
|
{
"content_hash": "ece11adef3ca71d623a13b8e20c25865",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 163,
"avg_line_length": 29.14516129032258,
"alnum_prop": 0.5556170448256779,
"repo_name": "fergalmoran/Chrome2Kindle",
"id": "ff6bfe400333aa1611323063cadeb58ae0e99a93",
"size": "7228",
"binary": false,
"copies": "26",
"ref": "refs/heads/master",
"path": "server/html5lib/treebuilders/simpletree.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6787"
},
{
"name": "Python",
"bytes": "3048627"
}
],
"symlink_target": ""
}
|
"""
This module provide extended power to decode HTML you crawled.
"""
import chardet
def smart_decode(binary, errors="strict"):
d = chardet.detect(binary)
encoding = d["encoding"]
confidence = d["confidence"]
text = binary.decode(encoding, errors=errors)
return text, encoding, confidence
#--- Unittest ---
if __name__ == "__main__":
def test_handle_errors():
utf8_text = "欢迎来到Python-CN。本社区主要讨论Python和Web开发技术。"
utf8 = utf8_text.encode("utf-8")
test_handle_errors()
|
{
"content_hash": "d2f4013a4992d8d55a76453c99e892ad",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 62,
"avg_line_length": 23.454545454545453,
"alnum_prop": 0.6492248062015504,
"repo_name": "MacHu-GWU/crawl_trulia-project",
"id": "e776531abbebe87d626c489035cb5e2ab0bfbadc",
"size": "599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawl_trulia/packages/crawlib/decoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "516"
},
{
"name": "HTML",
"bytes": "1156764"
},
{
"name": "Python",
"bytes": "45851"
}
],
"symlink_target": ""
}
|
import datetime
# start and end dates (year 2001)
start = datetime.datetime(2001, 1, 1)
end = datetime.datetime(2002, 1, 1)
# do a 4-hour timestep; time step should be specified in minutes. HSPF can use
# any time step from 1 minute to 1 day (1440 minutes) as long as it divides
# evenly into 1440.
tstep = 240
# PyHSPF has a "Watershed" class as a container for information about the
# physical hydrography of the watershed. instances of this class are used to
# generate the HSPFModel class. the following lines illustrate how to use it.
from pyhspf import Watershed
# first the watershed needs to be subdivided into subbasins, which are
# stored in a dictionary where the keys correspond to the names you want
# to give them. the keys must be strings of at most 15 characters for
# consistency with the WDM format. a subbasin consists of a flowplane to
# store information about the landscape, the reach/reservoir that the land
# drains to, and landuse (or other category data) to sudivide the subbasin
# into homogeneous land segments.
from pyhspf import Subbasin
# keep up with the subbasins using the "subbasin" dictionary
subbasins = {}
# call the first subbasin "100"
number = '100'
# create subbasin "100"
subbasin = Subbasin(number)
# subbasins are defined by many attributes, which are grouped into categories
# in PyHSPF Subbasin class instances including:
#
# --the flowplane (length, slope, centroid, average elevation)
#
# --the reach (name, upstream elevation, downstream elevation, length,
# optionally average flow rate and velocity)
#
# --the landuse categories to be used by the model and the corresponding areas
# within each subbasin
#
# --the inlets and outlets from the watershed
# flow plane parameters for the subbasin
length = 100 # m
planeslope = 0.02 # -
elev = 100 # m
centroid = [-90, 40] # lon, lat
# add the flow plane data for subbasin 100
subbasin.add_flowplane(length, planeslope, centroid, elev)
# now let's provide the info about the reach
name = 'dave stream' # something descriptive
maxelev = 110 # elevation at the top of the reach (m)
minelev = 100 # elevation at the bottom of the reach (m)
slopelen = 10 # the reach length (km)
# HSPF uses "FTABLES" to specify the stage-discharge relationship for a reach.
# PyHSPF can estimate the FTABLE using the average flow and velocity, or the
# FTABLE can be specified directly. here the FTABLE for this subbasin reach
# is generated from average flow and velocity.
flow = 10 # the inflow (cfs) must use these units
velocity = 1 # velocity (fps) again must use these units
# add the reach to the subbasin
subbasin.add_reach(name, maxelev, minelev, slopelen, flow = flow,
velocity = velocity)
# here is an alternative set of statements to supply the FTABLE directly.
# An FTABLE consists of 4 columns representing the relationships between
# depth, surface area, volume, and flow for a reach. HSPF does a linear
# interpolation between the depths in the first column to estimate the
# other parameters. Up to 18 rows can be used.
#ftable = [[0,0,0,0],
# [1,1,100,1],
# ]
#subbasin.add_reach(name, maxelev, minelev, slopelen, ftable = ftable)
# another piece of info needed for the subbasins is the land use (used to
# subdivide the subbasins into land segments, e.g. soils). so here this subbasin
# is assumed to be 20% developed (with 50% impervious/50% pervious land),
# 40% agriculture, and 40% forest. The areas are in square km so we get 20 km2
# impervious, etc.
#
# variable land use can be provided (e.g., for different years), but it's
# not necessary to change it for HSPF. impervious land must be specified as
# "Developed" to identify it; the relative percentages of developed land can
# be changed from the default of 50% if desired using the "ifraction" keyword.
landuse_names = ['Developed', 'Agriculture', 'Forest']
areas = [20, 40, 40]
subbasin.add_landuse(2001, landuse_names, areas)
# now it's done; let's add the subbasin to the dictionary of subbasins
subbasins[number] = subbasin
# make one more subbasin for this example (note all the parameters are the
# same except these few)
number = '101'
subbasin = Subbasin(number)
# let's just use the same flowplane parameters
subbasin.add_flowplane(length, planeslope, centroid, elev)
# slightly change the reach info
maxelev = 100
minelev = 90
flow = 12
subbasin.add_reach(name, maxelev, minelev, slopelen, flow = flow,
velocity = velocity)
# for simplicity just assume the same landuse types and areas
subbasin.add_landuse(2001, landuse_names, areas)
# and add the subbasin to the subbasins dictionary
subbasins[number] = subbasin
# now that that subbasins are specified it is possible to create an instance
# of the Watershed class that is used to build the HSPF input files.
watershed_name = 'Dave'
watershed = Watershed(watershed_name, subbasins)
# another key piece of information for the watershed is the flow network.
# it should be provided as an "updown" dictionary--that is, subbasin names
# are supplied as keys, and the dictionary returns the downstream subbasin
# names as values. So for this example the netword is just subbasin reach "100"
# goes into "101."
updown = {'100':'101'}
# add the mass linkage dictionary to the watershed
watershed.add_mass_linkage(updown)
# need to tell HSPF that subbasin "101" is an outlet where mass leaves. this
# information is needed because PyHSPF starts at the outlet and works
# upstream to build the HSPF model.
watershed.add_outlet('101')
# that is all the physical hydrography data needed by HSPF. the HSPFmodel class
# can be used to build the HSPF files for the simulation (the input and output
# WDM files and the UCI file).
from pyhspf import HSPFModel
# names of the files used in the simulation (the HSPF input and output files
# are generated automatically); can also specify a directory to use elsewhere
filename = 'example01'
# the UCI file generated by PyHSPF is named 'example01.uci' -- look at that
# file to see how the information in this script is translated to HSPF.
# the input and output WDM filenames are generated automatically, and are the
# model filename + '_in.wdm' for the input WDM file and '_out.wdm' for the
# output file (we'll need this later to retrieve results from the files)
wdmoutfile = filename + '_out.wdm'
# let's also generate an optional output file created by HSPF directly
outfile = filename + '.out'
# create an instance of the HSPFModel class
hspfmodel = HSPFModel()
# and build the model from the watershed
hspfmodel.build_from_watershed(watershed, filename, print_file = outfile,
tstep = tstep)
# to run a simulation it is necessary to assign precipitation, potential
# evapotranspiration, and any other time series to the subbasins.
# there are many different ways to estimate the potential evapotranspiration
# including correlation to observed pan evaporation, Penman-Monteith, etc.
# here the potential evapotranspiration is assumed to start at zero then
# increase to 12 mm in a day 7/01, then decreases to zero 1/01; thus max 4-hr
# potential evapotranspiration is 2 mm. the following statement will generate
# a time series with these assumptions.
maxET = 2
nsteps = (end-start).days * 1440 // tstep
evaporation = [maxET * (d - datetime.datetime(d.year, 1, 1)).days /
(datetime.datetime(d.year, 7, 1) -
datetime.datetime(d.year, 1, 1)).days
if d.month < 7
else
maxET - maxET * (d - datetime.datetime(d.year, 7, 1)).days /
(datetime.datetime(d.year + 1, 1, 1) -
datetime.datetime(d.year, 7, 1)).days
for d in [start + datetime.timedelta(minutes = tstep) * i
for i in range(nsteps)]
]
# specify the time series type
tstype = 'evaporation'
# give the dataset a unique identifier
identifier = 'example_evap'
# finally need the start date, a list of the data, and the time step (min)
hspfmodel.add_timeseries(tstype, identifier, start, evaporation, tstep = tstep)
# now tell HSPF how to use the time series for this model. the unique
# identifier for the time series and the unique subbasin numbers are used
# to make this connection. we will assign this time series to the whole
# watershed, although you can have a unique time series for each subbasin,
# landuse category, or each operation if you want.
hspfmodel.assign_watershed_timeseries(tstype, identifier)
# now add some random rainfall. here it is assumed there is a 5% chance of rain
# every 4-hour period and that the rainfall is an integer between 0 and 20.
import random
# make random numbers for each 4 hour timestep
# if the number is greater than 0.95 (5% chance), let's say it's raining and
# assign a value (this should give about a meter of rain per year)
rainfall = [random.randint(0,20) if random.random() > 0.95 else 0.
for i in range(nsteps)]
# assign the precipitation time series to the file
tstype = 'precipitation'
identifier = 'example_prec'
hspfmodel.add_timeseries(tstype, identifier, start, rainfall, tstep = tstep)
# again connect the time series to all the subbasins in the watershed
hspfmodel.assign_watershed_timeseries(tstype, identifier)
# now add default parameters to the HSPF land segments and reaches to run a
# hydrology-only simulation--similar methods exist to add the parameters for
# other HSPF modules
hspfmodel.add_hydrology()
# and now build the wdm input file using the timeseries
hspfmodel.build_wdminfile()
# the last piece of info that must be specified is the output information from
# the simulation, which is stored in an output WDM file (made automatically
# by PyHSPF). PyHSPF doesn't have every possible HSPF external target variable,
# but the list can be appended pretty easily if needed. the base assumption is
# every time step for fluxes and daily for state variables.
targets = ['reach_outvolume', # the volume that exits each reach at each step
'evaporation', # the evaporation volume in the land segments
'reach_volume', # the volume in the reach
'runoff', # the surface runoff
]
# the targets above each correspond to a particular Fortran variable; the idea
# is to make them more descriptive and easier to add. the targets above
# correspond to:
#
# reach_outvolume = ROVOL
# evaporation = TAET for PERLNDs, and IMPEV for IMPLNDs
# reach_volume = RO
# runoff = SURO, IFWO, and AGWO for PERLNDs, SURO for IMPLNDs
# now the "build_uci" function can be used to build the UCI input file.
# it also builds the output WDM file since it works together with the UCI
# file. this example just does hydrology but flags exist to add air temperature,
# snow, sediment, etc to the simulation assuming the required information has
# been provided.
hspfmodel.build_uci(targets, start, end, hydrology = True, verbose = False)
# now the input files are ready, so run it
hspfmodel.run(verbose = True)
# now "pickle" the hspfmodel for later. "pickling" means writing a python
# object to a file so that it can be accessed later. the concept is to save
# the PyHSPF HSPFModel instance, and forget about the UCI. The UCI file is
# always there if you want to see it, but changing the parameters is much
# easier in Python, and can even be scripted. The "with" statement just closes
# the file where the HSPFModel is stored.
import pickle
with open('hspfmodel', 'wb') as f: pickle.dump(hspfmodel, f)
# assuming that went ok (look at the HSPF-generated .ech and .out files),
# the results can be retrieved using WDMUtil
from pyhspf import WDMUtil
# create an instance of WDMUtil
wdm = WDMUtil()
# open the file for read access
wdm.open(wdmoutfile, 'r')
# pull up the flow at the outlet and plot it along with the precipitation
# and evapotranspiration. the attributes that identify the data are "IDCONS"
# (constituent ID) and "STAID " (station ID). these were assigned by the
# build_wdminfile and build_uci routines automatically; they can be modified
# as needed. the attributes always have six characters so make sure to add
# trailing spaces.
dsns = wdm.get_datasets(wdmoutfile)
idconss = [wdm.get_attribute(wdmoutfile, n, 'IDCONS') for n in dsns]
staids = [wdm.get_attribute(wdmoutfile, n, 'STAID ') for n in dsns]
# uncomment this line to see what's here in the output file
# print(dsns, idconss, staids)
# one HSPF parameter we saved is ROVOL (PyHSPF has a Postprocessor that can
# be used to simplify this, but WDMUtil can also be used more directly).
# The following statement finds the dataset number for the ROVOL timeseries
# for the reach for subbasin 101.
n = [dsn for dsn, idcons, staid in zip(dsns, idconss, staids)
if idcons == 'ROVOL' and staid == '101'][0]
# get the data for the reach volume flux dataset
rovol = wdm.get_data(wdmoutfile, n)
# need to close up the files opened by Fortran
wdm.close(wdmoutfile)
# rovol is the total volume (in Mm3) at each time step. so we need to convert
# it m3/s. we could have had HSPF do this, but it's nice to keep track of all
# the fluxes for looking at mass balance checks.
flows = [r * 10**6 / 3600 / 4 for r in rovol]
# plot it up right quick with matplotlib using the plotdate method.
from matplotlib import pyplot
# need a list of the dates/times for the plot
times = [start + i * datetime.timedelta(hours = 4)
for i in range(int((end - start).total_seconds() / 3600 / 4))]
# details omitted here, plenty of info elsewhere on matplotlib
fig = pyplot.figure(figsize = (8, 10))
axes = [fig.add_subplot(3, 1, i + 1) for i in range(3)]
axes[0].plot_date(times, rainfall, fmt = 'b-', lw = 0.3)
axes[1].plot_date(times, evaporation, fmt = 'g-', lw = 0.3)
axes[2].plot_date(times, flows, fmt = 'r-', lw = 0.3)
axes[2].set_xlabel('Date', fontsize = 9)
axes[0].set_ylabel('Precipitation, (mm)', fontsize = 10, color = 'blue')
axes[1].set_ylabel('Evapotranspiration (mm)', fontsize = 10, color = 'green')
axes[2].set_ylabel('Flow (m\u00B3/s)', fontsize = 10, color = 'red')
for ax in axes: ax.tick_params(axis = 'both', size = 9)
fig.autofmt_xdate(rotation = 25)
# show it
pyplot.show()
# pull open the HSPFModel that was pickled and change some hydrology process
# parameters. the PyHSPF classes have default values for the PERLNDs, IMPLNDs,
# and RCHRESs in the core module. the following lines show how to pull open the
# hspfmodel file, change some parameters, and view the impact on the simulation.
with open('hspfmodel', 'rb') as f: hspfmodel = pickle.load(f)
# change the lower zone storage number from 150 to 50 and the upper zone
# storage number from 10 to 5 for each of the perlnds and see the effects on
# the model output. HSPF parameter names are attached to the perlnd instances
# they all have 4-6 character variables in Fortran
for p in hspfmodel.perlnds:
p.LZSN = 50
p.UZSN = 5
# now just repeat the run and postprocessing
hspfmodel.build_uci(targets, start, end, hydrology = True, verbose = False)
hspfmodel.run()
wdm.open(wdmoutfile, 'r')
flows = [r * 10**6 / 3600 / 4 for r in wdm.get_data(wdmoutfile, n)]
fig = pyplot.figure(figsize = (8, 10))
axes = [fig.add_subplot(3, 1, i + 1) for i in range(3)]
axes[0].plot_date(times, rainfall, fmt = 'b-', lw = 0.3)
axes[1].plot_date(times, evaporation, fmt = 'g-', lw = 0.3)
axes[2].plot_date(times, flows, fmt = 'r-', lw = 0.3)
axes[2].set_xlabel('Date', fontsize = 9)
axes[0].set_ylabel('Precipitation, (mm)', fontsize = 10, color = 'blue')
axes[1].set_ylabel('Evapotranspiration (mm)', fontsize = 10, color = 'green')
axes[2].set_ylabel('Flow (m\u00B3/s)', fontsize = 10, color = 'red')
for ax in axes: ax.tick_params(axis = 'both', size = 9)
fig.autofmt_xdate(rotation = 25)
pyplot.show()
|
{
"content_hash": "dc74ca84d2413b7ede80f8078a274223",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 80,
"avg_line_length": 36.141573033707864,
"alnum_prop": 0.7162842753217683,
"repo_name": "kbrannan/PyHSPF",
"id": "809c0f2a78663cf6fe3f3c2c2b5e46784905e011",
"size": "16730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/introduction/intro01.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "335"
},
{
"name": "C",
"bytes": "7355"
},
{
"name": "C++",
"bytes": "169875"
},
{
"name": "FORTRAN",
"bytes": "3848038"
},
{
"name": "PHP",
"bytes": "25231"
},
{
"name": "Pascal",
"bytes": "457"
},
{
"name": "Python",
"bytes": "1341946"
},
{
"name": "Shell",
"bytes": "4514"
},
{
"name": "SourcePawn",
"bytes": "4265"
}
],
"symlink_target": ""
}
|
import wx
# begin wxGlade: dependencies
import gettext
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class LogonDialog(wx.Dialog):
"""
This class implements the logon dialog for the Pressure Ulcer System GUI.
It requests the users' id and password and then logs the user into the database.
Methods:
__init__(*args, **kwds) - creates the widgets in the panel and performs initialization
__set_properties() - set various properties of the widgets
__do_layout() - lays out the widgets
__doLogon - Button handler for performing the login
SetDb - Sets the database object
"""
def __init__(self, *args, **kwds):
"""
Creates the widgets in the panel and performs initialization
"""
# begin wxGlade: LogonDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.bitmap_1 = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap("icons\\header-logo.png", wx.BITMAP_TYPE_ANY))
self.mmps_label = wx.StaticText(self, wx.ID_ANY, _("Multi-Modality Portable System for \nPressure Ulcer Prevention and Care"), style=wx.ALIGN_CENTRE)
self.login_label = wx.StaticText(self, wx.ID_ANY, _("Please Log On"), style=wx.ALIGN_RIGHT)
self.username_label = wx.StaticText(self, wx.ID_ANY, _("Username:"))
self.username = wx.TextCtrl(self, wx.ID_ANY, "")
self.password_label = wx.StaticText(self, wx.ID_ANY, _("Password:"))
self.password = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_PASSWORD)
self.error_text = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_READONLY | wx.NO_BORDER)
self.login_button = wx.Button(self, wx.ID_ANY, _("Logon"))
self.cancel_button = wx.Button(self, wx.ID_CANCEL, "")
self.__set_properties()
self.__do_layout()
# end wxGlade
self.login_button.Bind(wx.EVT_BUTTON, self.__doLogon)
def __set_properties(self):
"""
Sets various properties of the widgets
"""
# begin wxGlade: LogonDialog.__set_properties
self.SetTitle(_("Logon"))
self.SetSize(wx.DLG_SZE(self, (476, 218)))
self.mmps_label.SetMinSize((200, 80))
self.mmps_label.SetFont(wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.login_label.SetMinSize((115, 19))
self.login_label.SetFont(wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.username_label.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.username.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.password_label.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.password.SetFont(wx.Font(10, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.error_text.SetBackgroundColour(wx.Colour(240, 240, 240))
self.error_text.SetForegroundColour(wx.Colour(255, 0, 0))
self.error_text.SetFont(wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.NORMAL, 0, ""))
self.login_button.SetDefault()
# end wxGlade
def __do_layout(self):
"""
Lays out the widgets in the frame
"""
# begin wxGlade: LogonDialog.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer_1 = wx.GridSizer(3, 2, 8, 8)
sizer_6 = wx.BoxSizer(wx.VERTICAL)
sizer_6.Add(self.bitmap_1, 0, 0, 0)
sizer_6.Add((20, 20), 0, 0, 0)
sizer_6.Add(self.mmps_label, 0, wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 0)
sizer_5.Add(sizer_6, 1, wx.EXPAND, 0)
grid_sizer_1.Add(self.login_label, 0, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_1.Add((20, 15), 0, 0, 0)
grid_sizer_1.Add(self.username_label, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.username, 0, 0, 0)
grid_sizer_1.Add(self.password_label, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_1.Add(self.password, 0, 0, 0)
sizer_5.Add(grid_sizer_1, 1, wx.EXPAND, 0)
sizer_1.Add(sizer_5, 1, wx.EXPAND, 0)
sizer_1.Add(self.error_text, 0, wx.EXPAND, 0)
sizer_2.Add(self.login_button, 0, 0, 0)
sizer_2.Add(self.cancel_button, 0, wx.LEFT, 10)
sizer_1.Add(sizer_2, 0, wx.ALL | wx.ALIGN_RIGHT, 5)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def __doLogon(self,event):
"""
Button handler for performing the login.
"""
user = self.username.GetValue()
pw = self.password.GetValue()
(rc,msg) = self.db.Logon(user,pw)
if rc != 0:
print "Error in logon, rc = %d" % rc
print msg
self.error_text.SetValue(str(msg))
else:
self.EndModal(1)
def SetDb(self,db):
"""
Sets the database object.
"""
self.db = db
# end of class LogonDialog
#class LogonDialog(wx.App):
# def OnInit(self):
# wx.InitAllImageHandlers()
# logon_dialog = LogonDialog(None, wx.ID_ANY, "")
# self.SetTopWindow(logon_dialog)
# logon_dialog.Show()
# return 1
# end of class LogonDialog
if __name__ == "__main__":
gettext.install("logonDialog") # replace with the appropriate catalog name
logonDialog = LogonDialog(0)
logonDialog.MainLoop()
|
{
"content_hash": "ff57a083de38a592eb63a2ed493e754c",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 157,
"avg_line_length": 41.17557251908397,
"alnum_prop": 0.6028921023359288,
"repo_name": "VHAINNOVATIONS/GE-Pressure-Ulcer",
"id": "f07ace684f2051ae1053e5ba41b8d5619777b936",
"size": "5528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_gui_decision_support_webportal/python/logon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10761"
},
{
"name": "C++",
"bytes": "1363462"
},
{
"name": "CSS",
"bytes": "172280"
},
{
"name": "JavaScript",
"bytes": "790100"
},
{
"name": "Python",
"bytes": "563688"
},
{
"name": "Shell",
"bytes": "3599"
},
{
"name": "XSLT",
"bytes": "68094"
}
],
"symlink_target": ""
}
|
import json
import flask
from flask import abort, request as req
from optparse import OptionParser
from remote_manager import RemoteControl, PrepareVMManager, GetAllSystemInfo
from system_control_cmd import *
from VMDash_cli import KEEP_STATISTICS
import time, threading
VMDash = flask.Flask(__name__)
ServerCurrentStatistics = {}
@VMDash.route('/messages', methods = ['POST'])
def get_messages():
global ServerCurrentStatistics
if flask.request.headers['Content-Type'] == 'application/json':
jsonData = flask.request.json
ServerIPAddr = jsonData['myipaddr']
if not ServerCurrentStatistics.has_key(ServerIPAddr):
ServerCurrentStatistics[ServerIPAddr] = []
ServerCurrentStatistics[ServerIPAddr].append(jsonData)
if len(ServerCurrentStatistics[ServerIPAddr]) > KEEP_STATISTICS:
del ServerCurrentStatistics[ServerIPAddr][0]
return ''
return '415 Unsupport File'
def GetRangePanelType(value):
if value < 50:
return 'panel-primary'
if value < 80:
return 'panel-yellow'
return 'panel-red'
def convertTimeFormat(curtime):
return '%04d-%02d-%02d %02d:%02d' % (curtime.tm_year, curtime.tm_mon, curtime.tm_mday, curtime.tm_hour, curtime.tm_min)
class ServerDate:
def __init__(self, integer_time):
self.datetime = time.localtime(integer_time)
def date(self):
return (self.datetime.tm_year, self.datetime.tm_mon, self.datetime.tm_mday)
def readable(self):
return '%02d-%02d' % (self.datetime.tm_mon, self.datetime.tm_mday)
def issamedate(self, integer_time):
testtime = time.localtime(integer_time)
return (self.datetime.tm_year == testtime.tm_year) and (self.datetime.tm_mon == testtime.tm_mon) and (self.datetime.tm_mday == testtime.tm_mday)
def GetPeakUserListWithServer(ServerInfoList):
result = {}
# return [{'date':XXXX, 'users':[]}, {}, ...]
for ServerInfo in ServerInfoList:
if ServerInfo.has_key('update_time'):
dateobj = ServerDate(ServerInfo['update_time'])
result_key = dateobj.readable()
if not result.has_key(result_key):
result[result_key] = []
admin_user = ServerInfo['admin_user']
for userinfo in ServerInfo['users']:
if admin_user != userinfo[0]:
if userinfo[0] not in result[result_key]:
result[result_key].append(userinfo[0])
return result
# result = {'datekey':[], 'datekey':[], ...}
@VMDash.route('/details/<ipaddr>')
@VMDash.route('/details/<ipaddr>/<listindex>')
def details(ipaddr, listindex = None):
global ServerCurrentStatistics
overall = {}
summary = []
cpu_usage_title = []
cpu_usage = []
last_usage_title = []
last_usage = []
last_usage_summary = []
peak_user_list = []
peak_user_title = []
if ServerCurrentStatistics.has_key(ipaddr):
if listindex is None:
listindex = len(ServerCurrentStatistics[ipaddr]) - 1
listindex = int(listindex)
ServerInfo = ServerCurrentStatistics[ipaddr][listindex]
try:
hostname = ServerInfo['platform']['hostname']
osname = ServerInfo['platform']['osname']
kernel = ServerInfo['platform']['kernel']
except:
hostname = 'Unknown'
osname = 'Unknown'
kernel = 'Unknown'
for diskinfo in ServerInfo['disks']:
try:
if diskinfo[5] == '/':
total_disk = diskinfo[1]
percent_disk = int(diskinfo[4][:-1])
break
except:
continue
else:
total_disk = 'Unknown'
percent_disk = 0
cur_url = '/details/%s' % ipaddr
server_list = ServerCurrentStatistics.keys()
server_list.sort()
cur_index = server_list.index(ipaddr)
next_url = '/details/%s' % server_list[(cur_index + 1) % len(server_list)]
prev_url = '/details/%s' % server_list[cur_index - 1]
try:
listindex = int(listindex)
except:
listindex = len(ServerCurrentStatistics[ipaddr]) - 1
prevIndex = max(0, listindex - 1)
nextIndex = min(listindex + 1, len(ServerCurrentStatistics[ipaddr]) - 1)
prevServerInfo = ServerCurrentStatistics[ipaddr][prevIndex]
nextServerInfo = ServerCurrentStatistics[ipaddr][nextIndex]
curtime = convertTimeFormat(time.localtime(ServerInfo['update_time']))
prevtime = convertTimeFormat(time.localtime(prevServerInfo['update_time']))
nexttime = convertTimeFormat(time.localtime(nextServerInfo['update_time']))
curtimeurl = '/details/%s/%d' % (ipaddr, listindex)
nexttimeurl = '/details/%s/%d' % (ipaddr, nextIndex)
prevtimeurl = '/details/%s/%d' % (ipaddr, prevIndex)
overall = {'ipaddr' : ipaddr, 'hostname':hostname, 'osname':osname, 'kernel':kernel,
'uptime':ServerInfo['uptime'], 'core':str(ServerInfo['cpus']['cpus']),
'disk':total_disk, 'admin':ServerInfo['admin_user'],
'cururl':cur_url, 'prevurl':prev_url, 'nexturl':next_url, 'homeurl':'/',
'curtimeurl':curtimeurl, 'prevtimeurl':prevtimeurl, 'nexttimeurl':nexttimeurl,
'curtime':curtime, 'prevtime': prevtime, 'nexttime' : nexttime}
try:
overall['mem'] = '%dMB' % ServerInfo['memory']['total']
except:
overall['mem'] = '0MB'
# CPU Load
#import pprint
#pprint.pprint(ServerInfo)
item = {}
#print ServerInfo['cpu_usage']
cpu_usage = int(ServerInfo['cpu_usage']['used'])
item['title'] = 'CPU'
item['value'] = str(cpu_usage) + '%'
item['level'] = GetRangePanelType(cpu_usage)
summary.append(item)
# Peak User
peak_user_title = ['Date', 'Number', 'Users']
user_info_daily = GetPeakUserListWithServer(ServerCurrentStatistics[ipaddr])
daily_keys = user_info_daily.keys()
daily_keys.sort()
all_users = []
for date_key in daily_keys:
item = {}
item['level'] = 'success'
user_list = []
for user in user_info_daily[date_key]:
user_list.append(user + '@humaxdigital.com')
all_users = list(set(all_users) | set(user_list))
item['list'] = [date_key, len(user_info_daily[date_key]), '; '.join(user_list)]
peak_user_list.append(item)
item = {}
item['level'] = 'danger'
item['list'] = ['ALL', len(all_users), '; '.join(all_users)]
peak_user_list.insert(0, item)
# User
connected_user = 0
userlist = []
for userinfo in ServerInfo['users']:
if ServerInfo['admin_user'] != userinfo[0]:
if userinfo[0] not in userlist:
userlist.append(userinfo[0])
connected_user += 1
item = {}
item['title'] = 'Users(connected/id)'
item['value'] = '%d/%d' % (connected_user, len(userlist))
item['level'] = 'panel-primary'
if connected_user < 1:
item['level'] = 'panel-red'
summary.append(item)
item = {}
item['title'] = 'Disk'
item['value'] = '%d%%' % percent_disk
item['level'] = GetRangePanelType(percent_disk)
summary.append(item)
item = {}
item['title'] = 'Memory'
item['value'] = '%d%%' % ServerInfo['memory']['percent']
item['level'] = GetRangePanelType(ServerInfo['memory']['percent'])
summary.append(item)
cpu_usage_title = ['User', 'CPU(%)', 'MEM(%)', 'VSZ', 'RSS', 'TTY', 'STAT', 'START', 'DUR', 'COMMAND']
cpu_usage = []
color_side = ['danger', 'warning', 'success', 'active']
for index, usage in enumerate(ServerInfo['cpu_usage']['top_20']):
item = {}
item['level'] = color_side[(index / 5) % 4]
item['list'] = [usage[0], usage[2], usage[3], usage[4], usage[5], usage[6], usage[7], usage[8], usage[9], usage[10]]
cpu_usage.append(item)
last_usage_title = ['User', 'TTY', 'IPAddress', 'Time']
last_usage = []
color_side = ['danger', 'warning', 'success', 'active']
for index, usage in enumerate(ServerInfo['last']):
item = {}
if 'still' in usage[3]:
item['level'] = 'danger'
else:
item['level'] = 'active'
item['list'] = [usage[0], usage[1], usage[2], usage[3]]
last_usage.append(item)
skip_users = [ServerInfo['admin_user'], 'reboot', 'root', 'wtmp', ]
user_summary = {}
for usage in ServerInfo['last']:
if usage[0] not in skip_users:
if not user_summary.has_key(usage[0]):
user_summary[usage[0]] = {'flag':False, 'data':''}
if not user_summary[usage[0]]['flag']:
user_summary[usage[0]]['flag'] = True
user_summary[usage[0]]['data'] = usage
elif 'still' in usage[3]:
user_summary[usage[0]]['data'] = usage
user_list = user_summary.keys()
user_list.sort()
for user in user_list:
usage = user_summary[user]['data']
item = {}
if 'still' in usage[3]:
item['level'] = 'success'
else:
item['level'] = 'danger'
item['list'] = [usage[0], usage[1], usage[2], usage[3]]
last_usage_summary.append(item)
return flask.render_template('details.html', overall = overall, summary = summary,
cpu_usage_title = cpu_usage_title, cpu_usage = cpu_usage,
last_usage_title = last_usage_title, last_usage = last_usage, last_usage_summary = last_usage_summary,
peak_user_title = peak_user_title, peak_user_list = peak_user_list)
@VMDash.route('/')
@VMDash.route('/index')
def index():
global ServerCurrentStatistics
headers = ['IP', 'Name', 'CPU', 'MEM', 'DISK', 'Dist', 'User', 'Updated']
ServerIPList = ServerCurrentStatistics.keys()
ServerIPList.sort()
data = []
for serverIP in ServerIPList:
serverInfo = ServerCurrentStatistics[serverIP][-1]
all_data = serverInfo
level = 'active'
ipaddr = serverIP
try:
hostname = all_data['platform']['hostname']
cpu = str(all_data['cpu_usage']['used']) + '% - ' + str(all_data['cpus']['cpus']) + ' cores'
mem = '%dMB(%d%%)' % (all_data['memory']['total'], all_data['memory']['percent'])
for diskinfo in all_data['disks']:
if len(diskinfo) < 6:
disk = 'Undefined'
continue
if diskinfo[5] == '/':
disk = "%s/%s(%s)" % (diskinfo[2], diskinfo[1], diskinfo[4])
percent = int(diskinfo[4][:-1])
if percent > 85:
level = 'danger'
break
else:
disk = 'Undefined'
userlist = []
for userinfo in all_data['users']:
if userinfo[0] not in userlist:
if all_data['admin_user'] != userinfo[0]:
userlist.append(userinfo[0])
if len(userlist) <= 1:
level = 'info'
if len(userlist) > 3:
users = ';'.join(userlist[:3]) + '...(%d)' % len(userlist)
else:
users = ';'.join(userlist)
dist = all_data['platform']['osname']
updated_time = convertTimeFormat(time.localtime(all_data['update_time']))
item = {'list':[ipaddr, hostname, cpu, mem, disk, dist, users, updated_time]}
item['level'] = level
item['url'] = '/details/%s/%d' % (ipaddr, len(ServerCurrentStatistics[serverIP]) - 1)
except:
item = {'list':[ipaddr, 'Unknown', 'Unknown', 'Unknown', 'Unknown', 'Unknown', 'Unknown', 'Unknown']}
item['url'] = '/details/%s/%d' % (ipaddr, len(ServerCurrentStatistics[serverIP]) - 1)
data.append(item)
return flask.render_template('index.html', table_headers = headers, table_data = data)
if __name__ == '__main__':
print "#####################################"
print " Private Cloud Dashboard"
print "#####################################"
# Load Analytic Statistics '__statistics_IPADDR.json'
statistic_files = []
path = './'
for file in os.listdir(path):
full_name = os.path.join(path, file)
if os.path.isfile(full_name) and ('__statistics' in full_name):
statistic_files.append(full_name)
for filename in statistic_files:
new_name = filename.replace('./__statistics_', '')
if not filename.endswith('.json'):
continue
ipaddr = new_name[:-5]
with open(filename, 'r') as f:
newstatis = json.load(f)
ServerCurrentStatistics[ipaddr] = newstatis
VMDash.run(debug = True, host="0.0.0.0", port=5000)
|
{
"content_hash": "731859241d6e00716e9276ea7719c607",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 152,
"avg_line_length": 37.41340782122905,
"alnum_prop": 0.5455427803494102,
"repo_name": "lach76/LinuxHostMonitor",
"id": "6322a8440f1f1df2751e4eb555e6cdb91a517685",
"size": "13417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VMDash.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3485"
},
{
"name": "HTML",
"bytes": "9066"
},
{
"name": "JavaScript",
"bytes": "241043"
},
{
"name": "Python",
"bytes": "33728"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"""Couldn't import Django. Are you sure it's installed and
available on your PYTHONPATH environment variable? Did you
forget to activate a virtual environment?""",
)
raise
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_path)
sys.path.append(os.path.join(current_path, 'apps'))
execute_from_command_line(sys.argv)
|
{
"content_hash": "77546a094c5a43b6e166e92b715d3a9e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 77,
"avg_line_length": 36.5,
"alnum_prop": 0.6238145416227608,
"repo_name": "vchaptsev/cookiecutter-django-vue",
"id": "80a3e3a681b5e65c220afcfe2359088cac26f3a0",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{cookiecutter.project_slug}}/backend/manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1013"
},
{
"name": "HTML",
"bytes": "1139"
},
{
"name": "JavaScript",
"bytes": "9625"
},
{
"name": "Python",
"bytes": "32330"
},
{
"name": "Shell",
"bytes": "1392"
},
{
"name": "Vue",
"bytes": "437"
}
],
"symlink_target": ""
}
|
"""some various utilities and helper classes, most of them used in the
main pylint class
"""
from __future__ import print_function
import codecs
import collections
from inspect import cleandoc
import os
from os.path import dirname, basename, splitext, exists, isdir, join, normpath
import re
import sys
import tokenize
import warnings
import textwrap
import six
from six.moves import zip # pylint: disable=redefined-builtin
from astroid import nodes, Module
from astroid import modutils
from pylint.interfaces import IRawChecker, ITokenChecker, UNDEFINED, implements
from pylint.reporters.ureports.nodes import Section
from pylint.exceptions import InvalidMessageError, UnknownMessageError, EmptyReportError
MSG_TYPES = {
'I' : 'info',
'C' : 'convention',
'R' : 'refactor',
'W' : 'warning',
'E' : 'error',
'F' : 'fatal'
}
MSG_TYPES_LONG = {v: k for k, v in six.iteritems(MSG_TYPES)}
MSG_TYPES_STATUS = {
'I' : 0,
'C' : 16,
'R' : 8,
'W' : 4,
'E' : 2,
'F' : 1
}
_MSG_ORDER = 'EWRCIF'
MSG_STATE_SCOPE_CONFIG = 0
MSG_STATE_SCOPE_MODULE = 1
MSG_STATE_CONFIDENCE = 2
# Allow stopping after the first semicolon encountered,
# so that an option can be continued with the reasons
# why it is active or disabled.
OPTION_RGX = re.compile(r'\s*#.*\bpylint:\s*([^;]+);{0,1}')
# The line/node distinction does not apply to fatal errors and reports.
_SCOPE_EXEMPT = 'FR'
class WarningScope(object):
LINE = 'line-based-msg'
NODE = 'node-based-msg'
_MsgBase = collections.namedtuple(
'_MsgBase',
['msg_id', 'symbol', 'msg', 'C', 'category', 'confidence',
'abspath', 'path', 'module', 'obj', 'line', 'column'])
class Message(_MsgBase):
"""This class represent a message to be issued by the reporters"""
def __new__(cls, msg_id, symbol, location, msg, confidence):
return _MsgBase.__new__(
cls, msg_id, symbol, msg, msg_id[0], MSG_TYPES[msg_id[0]],
confidence, *location)
def format(self, template):
"""Format the message according to the given template.
The template format is the one of the format method :
cf. http://docs.python.org/2/library/string.html#formatstrings
"""
# For some reason, _asdict on derived namedtuples does not work with
# Python 3.4. Needs some investigation.
return template.format(**dict(zip(self._fields, self)))
def get_module_and_frameid(node):
"""return the module name and the frame id in the module"""
frame = node.frame()
module, obj = '', []
while frame:
if isinstance(frame, Module):
module = frame.name
else:
obj.append(getattr(frame, 'name', '<lambda>'))
try:
frame = frame.parent.frame()
except AttributeError:
frame = None
obj.reverse()
return module, '.'.join(obj)
def category_id(cid):
cid = cid.upper()
if cid in MSG_TYPES:
return cid
return MSG_TYPES_LONG.get(cid)
def safe_decode(line, encoding, *args, **kwargs):
'''return decoded line from encoding or decode with default encoding'''
try:
return line.decode(encoding or sys.getdefaultencoding(), *args, **kwargs)
except LookupError:
return line.decode(sys.getdefaultencoding(), *args, **kwargs)
def decoding_stream(stream, encoding, errors='strict'):
try:
reader_cls = codecs.getreader(encoding or sys.getdefaultencoding())
except LookupError:
reader_cls = codecs.getreader(sys.getdefaultencoding())
return reader_cls(stream, errors)
def _decoding_readline(stream, encoding):
'''return lambda function for tokenize with safe decode'''
return decoding_stream(stream, encoding, errors='replace').readline
def tokenize_module(module):
with module.stream() as stream:
readline = stream.readline
if sys.version_info < (3, 0):
if module.file_encoding is not None:
readline = _decoding_readline(stream, module.file_encoding)
return list(tokenize.generate_tokens(readline))
return list(tokenize.tokenize(readline))
def build_message_def(checker, msgid, msg_tuple):
if implements(checker, (IRawChecker, ITokenChecker)):
default_scope = WarningScope.LINE
else:
default_scope = WarningScope.NODE
options = {}
if len(msg_tuple) > 3:
(msg, symbol, descr, options) = msg_tuple
elif len(msg_tuple) > 2:
(msg, symbol, descr) = msg_tuple
else:
# messages should have a symbol, but for backward compatibility
# they may not.
(msg, descr) = msg_tuple
warnings.warn("[pylint 0.26] description of message %s doesn't include "
"a symbolic name" % msgid, DeprecationWarning)
symbol = None
options.setdefault('scope', default_scope)
return MessageDefinition(checker, msgid, msg, descr, symbol, **options)
class MessageDefinition(object):
def __init__(self, checker, msgid, msg, descr, symbol, scope,
minversion=None, maxversion=None, old_names=None):
self.checker = checker
if len(msgid) != 5:
raise InvalidMessageError('Invalid message id %r' % msgid)
if not msgid[0] in MSG_TYPES:
raise InvalidMessageError(
'Bad message type %s in %r' % (msgid[0], msgid))
self.msgid = msgid
self.msg = msg
self.descr = descr
self.symbol = symbol
self.scope = scope
self.minversion = minversion
self.maxversion = maxversion
self.old_names = old_names or []
def may_be_emitted(self):
"""return True if message may be emitted using the current interpreter"""
if self.minversion is not None and self.minversion > sys.version_info:
return False
if self.maxversion is not None and self.maxversion <= sys.version_info:
return False
return True
def format_help(self, checkerref=False):
"""return the help string for the given message id"""
desc = self.descr
if checkerref:
desc += ' This message belongs to the %s checker.' % \
self.checker.name
title = self.msg
if self.symbol:
msgid = '%s (%s)' % (self.symbol, self.msgid)
else:
msgid = self.msgid
if self.minversion or self.maxversion:
restr = []
if self.minversion:
restr.append('< %s' % '.'.join([str(n) for n in self.minversion]))
if self.maxversion:
restr.append('>= %s' % '.'.join([str(n) for n in self.maxversion]))
restr = ' or '.join(restr)
if checkerref:
desc += " It can't be emitted when using Python %s." % restr
else:
desc += " This message can't be emitted when using Python %s." % restr
desc = _normalize_text(' '.join(desc.split()), indent=' ')
if title != '%s':
title = title.splitlines()[0]
return ':%s: *%s*\n%s' % (msgid, title, desc)
return ':%s:\n%s' % (msgid, desc)
class MessagesHandlerMixIn(object):
"""a mix-in class containing all the messages related methods for the main
lint class
"""
def __init__(self):
self._msgs_state = {}
self.msg_status = 0
def _checker_messages(self, checker):
for known_checker in self._checkers[checker.lower()]:
for msgid in known_checker.msgs:
yield msgid
def disable(self, msgid, scope='package', line=None, ignore_unknown=False):
"""don't output message of the given id"""
self._set_msg_status(msgid, enable=False, scope=scope,
line=line, ignore_unknown=ignore_unknown)
def enable(self, msgid, scope='package', line=None, ignore_unknown=False):
"""reenable message of the given id"""
self._set_msg_status(msgid, enable=True, scope=scope,
line=line, ignore_unknown=ignore_unknown)
def _set_msg_status(self, msgid, enable, scope='package', line=None, ignore_unknown=False):
assert scope in ('package', 'module')
if msgid == 'all':
for _msgid in MSG_TYPES:
self._set_msg_status(_msgid, enable, scope, line, ignore_unknown)
if enable and not self._python3_porting_mode:
# Don't activate the python 3 porting checker if it wasn't activated explicitly.
self.disable('python3')
return
# msgid is a category?
catid = category_id(msgid)
if catid is not None:
for _msgid in self.msgs_store._msgs_by_category.get(catid):
self._set_msg_status(_msgid, enable, scope, line)
return
# msgid is a checker name?
if msgid.lower() in self._checkers:
msgs_store = self.msgs_store
for checker in self._checkers[msgid.lower()]:
for _msgid in checker.msgs:
if _msgid in msgs_store._alternative_names:
self._set_msg_status(_msgid, enable, scope, line)
return
# msgid is report id?
if msgid.lower().startswith('rp'):
if enable:
self.enable_report(msgid)
else:
self.disable_report(msgid)
return
try:
# msgid is a symbolic or numeric msgid.
msg = self.msgs_store.check_message_id(msgid)
except UnknownMessageError:
if ignore_unknown:
return
raise
if scope == 'module':
self.file_state.set_msg_status(msg, line, enable)
if enable:
self.add_message('locally-enabled', line=line,
args=(msg.symbol, msg.msgid))
elif msg.symbol != 'locally-disabled':
self.add_message('locally-disabled', line=line,
args=(msg.symbol, msg.msgid))
else:
msgs = self._msgs_state
msgs[msg.msgid] = enable
# sync configuration object
self.config.enable = [self._message_symbol(mid) for mid, val
in sorted(six.iteritems(msgs)) if val]
self.config.disable = [self._message_symbol(mid) for mid, val
in sorted(six.iteritems(msgs)) if not val]
def _message_symbol(self, msgid):
"""Get the message symbol of the given message id
Return the original message id if the message does not
exist.
"""
try:
return self.msgs_store.check_message_id(msgid).symbol
except UnknownMessageError:
return msgid
def get_message_state_scope(self, msgid, line=None, confidence=UNDEFINED):
"""Returns the scope at which a message was enabled/disabled."""
if self.config.confidence and confidence.name not in self.config.confidence:
return MSG_STATE_CONFIDENCE
try:
if line in self.file_state._module_msgs_state[msgid]:
return MSG_STATE_SCOPE_MODULE
except (KeyError, TypeError):
return MSG_STATE_SCOPE_CONFIG
return None
def is_message_enabled(self, msg_descr, line=None, confidence=None):
"""return true if the message associated to the given message id is
enabled
msgid may be either a numeric or symbolic message id.
"""
if self.config.confidence and confidence:
if confidence.name not in self.config.confidence:
return False
try:
msgid = self.msgs_store.check_message_id(msg_descr).msgid
except UnknownMessageError:
# The linter checks for messages that are not registered
# due to version mismatch, just treat them as message IDs
# for now.
msgid = msg_descr
if line is None:
return self._msgs_state.get(msgid, True)
try:
return self.file_state._module_msgs_state[msgid][line]
except KeyError:
return self._msgs_state.get(msgid, True)
def add_message(self, msg_descr, line=None, node=None, args=None, confidence=UNDEFINED):
"""Adds a message given by ID or name.
If provided, the message string is expanded using args
AST checkers should must the node argument (but may optionally
provide line if the line number is different), raw and token checkers
must provide the line argument.
"""
msg_info = self.msgs_store.check_message_id(msg_descr)
msgid = msg_info.msgid
# backward compatibility, message may not have a symbol
symbol = msg_info.symbol or msgid
# Fatal messages and reports are special, the node/scope distinction
# does not apply to them.
if msgid[0] not in _SCOPE_EXEMPT:
if msg_info.scope == WarningScope.LINE:
if line is None:
raise InvalidMessageError(
'Message %s must provide line, got None' % msgid)
if node is not None:
raise InvalidMessageError(
'Message %s must only provide line, '
'got line=%s, node=%s' % (msgid, line, node))
elif msg_info.scope == WarningScope.NODE:
# Node-based warnings may provide an override line.
if node is None:
raise InvalidMessageError(
'Message %s must provide Node, got None' % msgid)
if line is None and node is not None:
line = node.fromlineno
if hasattr(node, 'col_offset'):
col_offset = node.col_offset # XXX measured in bytes for utf-8, divide by two for chars?
else:
col_offset = None
# should this message be displayed
if not self.is_message_enabled(msgid, line, confidence):
self.file_state.handle_ignored_message(
self.get_message_state_scope(msgid, line, confidence),
msgid, line, node, args, confidence)
return
# update stats
msg_cat = MSG_TYPES[msgid[0]]
self.msg_status |= MSG_TYPES_STATUS[msgid[0]]
self.stats[msg_cat] += 1
self.stats['by_module'][self.current_name][msg_cat] += 1
try:
self.stats['by_msg'][symbol] += 1
except KeyError:
self.stats['by_msg'][symbol] = 1
# expand message ?
msg = msg_info.msg
if args:
msg %= args
# get module and object
if node is None:
module, obj = self.current_name, ''
abspath = self.current_file
else:
module, obj = get_module_and_frameid(node)
abspath = node.root().file
path = abspath.replace(self.reporter.path_strip_prefix, '')
# add the message
self.reporter.handle_message(
Message(msgid, symbol,
(abspath, path, module, obj, line or 1, col_offset or 0), msg, confidence))
def print_full_documentation(self, stream=None):
"""output a full documentation in ReST format"""
if not stream:
stream = sys.stdout
print("Pylint global options and switches", file=stream)
print("----------------------------------", file=stream)
print("", file=stream)
print("Pylint provides global options and switches.", file=stream)
print("", file=stream)
by_checker = {}
for checker in self.get_checkers():
if checker.name == 'master':
if checker.options:
for section, options in checker.options_by_section():
if section is None:
title = 'General options'
else:
title = '%s options' % section.capitalize()
print(title, file=stream)
print('~' * len(title), file=stream)
_rest_format_section(stream, None, options)
print("", file=stream)
else:
name = checker.name
try:
by_checker[name]['options'] += checker.options_and_values()
by_checker[name]['msgs'].update(checker.msgs)
by_checker[name]['reports'] += checker.reports
except KeyError:
by_checker[name] = {
'options': list(checker.options_and_values()),
'msgs': dict(checker.msgs),
'reports': list(checker.reports),
}
print("Pylint checkers' options and switches", file=stream)
print("-------------------------------------", file=stream)
print("", file=stream)
print("Pylint checkers can provide three set of features:", file=stream)
print("", file=stream)
print("* options that control their execution,", file=stream)
print("* messages that they can raise,", file=stream)
print("* reports that they can generate.", file=stream)
print("", file=stream)
print("Below is a list of all checkers and their features.", file=stream)
print("", file=stream)
for checker, info in sorted(six.iteritems(by_checker)):
self._print_checker_doc(checker, info, stream=stream)
@staticmethod
def _print_checker_doc(checker_name, info, stream=None):
"""Helper method for print_full_documentation.
Also used by doc/exts/pylint_extensions.py.
"""
if not stream:
stream = sys.stdout
doc = info.get('doc')
module = info.get('module')
msgs = info.get('msgs')
options = info.get('options')
reports = info.get('reports')
title = '%s checker' % (checker_name.replace("_", " ").title())
if module:
# Provide anchor to link against
print(".. _%s:\n" % module, file=stream)
print(title, file=stream)
print('~' * len(title), file=stream)
print("", file=stream)
if module:
print("This checker is provided by ``%s``." % module, file=stream)
print("Verbatim name of the checker is ``%s``." % checker_name, file=stream)
print("", file=stream)
if doc:
title = 'Documentation'
print(title, file=stream)
print('^' * len(title), file=stream)
print(cleandoc(doc), file=stream)
print("", file=stream)
if options:
title = 'Options'
print(title, file=stream)
print('^' * len(title), file=stream)
_rest_format_section(stream, None, options)
print("", file=stream)
if msgs:
title = 'Messages'
print(title, file=stream)
print('^' * len(title), file=stream)
for msgid, msg in sorted(six.iteritems(msgs),
key=lambda kv: (_MSG_ORDER.index(kv[0][0]), kv[1])):
msg = build_message_def(checker_name, msgid, msg)
print(msg.format_help(checkerref=False), file=stream)
print("", file=stream)
if reports:
title = 'Reports'
print(title, file=stream)
print('^' * len(title), file=stream)
for report in reports:
print(':%s: %s' % report[:2], file=stream)
print("", file=stream)
print("", file=stream)
class FileState(object):
"""Hold internal state specific to the currently analyzed file"""
def __init__(self, modname=None):
self.base_name = modname
self._module_msgs_state = {}
self._raw_module_msgs_state = {}
self._ignored_msgs = collections.defaultdict(set)
self._suppression_mapping = {}
def collect_block_lines(self, msgs_store, module_node):
"""Walk the AST to collect block level options line numbers."""
for msg, lines in six.iteritems(self._module_msgs_state):
self._raw_module_msgs_state[msg] = lines.copy()
orig_state = self._module_msgs_state.copy()
self._module_msgs_state = {}
self._suppression_mapping = {}
self._collect_block_lines(msgs_store, module_node, orig_state)
def _collect_block_lines(self, msgs_store, node, msg_state):
"""Recursively walk (depth first) AST to collect block level options
line numbers.
"""
for child in node.get_children():
self._collect_block_lines(msgs_store, child, msg_state)
first = node.fromlineno
last = node.tolineno
# first child line number used to distinguish between disable
# which are the first child of scoped node with those defined later.
# For instance in the code below:
#
# 1. def meth8(self):
# 2. """test late disabling"""
# 3. # pylint: disable=E1102
# 4. print self.blip
# 5. # pylint: disable=E1101
# 6. print self.bla
#
# E1102 should be disabled from line 1 to 6 while E1101 from line 5 to 6
#
# this is necessary to disable locally messages applying to class /
# function using their fromlineno
if (isinstance(node, (nodes.Module, nodes.ClassDef, nodes.FunctionDef))
and node.body):
firstchildlineno = node.body[0].fromlineno
else:
firstchildlineno = last
for msgid, lines in six.iteritems(msg_state):
for lineno, state in list(lines.items()):
original_lineno = lineno
if first > lineno or last < lineno:
continue
# Set state for all lines for this block, if the
# warning is applied to nodes.
if msgs_store.check_message_id(msgid).scope == WarningScope.NODE:
if lineno > firstchildlineno:
state = True
first_, last_ = node.block_range(lineno)
else:
first_ = lineno
last_ = last
for line in range(first_, last_+1):
# do not override existing entries
if line in self._module_msgs_state.get(msgid, ()):
continue
if line in lines: # state change in the same block
state = lines[line]
original_lineno = line
if not state:
self._suppression_mapping[(msgid, line)] = original_lineno
try:
self._module_msgs_state[msgid][line] = state
except KeyError:
self._module_msgs_state[msgid] = {line: state}
del lines[lineno]
def set_msg_status(self, msg, line, status):
"""Set status (enabled/disable) for a given message at a given line"""
assert line > 0
try:
self._module_msgs_state[msg.msgid][line] = status
except KeyError:
self._module_msgs_state[msg.msgid] = {line: status}
def handle_ignored_message(self, state_scope, msgid, line,
node, args, confidence): # pylint: disable=unused-argument
"""Report an ignored message.
state_scope is either MSG_STATE_SCOPE_MODULE or MSG_STATE_SCOPE_CONFIG,
depending on whether the message was disabled locally in the module,
or globally. The other arguments are the same as for add_message.
"""
if state_scope == MSG_STATE_SCOPE_MODULE:
try:
orig_line = self._suppression_mapping[(msgid, line)]
self._ignored_msgs[(msgid, orig_line)].add(line)
except KeyError:
pass
def iter_spurious_suppression_messages(self, msgs_store):
for warning, lines in six.iteritems(self._raw_module_msgs_state):
for line, enable in six.iteritems(lines):
if not enable and (warning, line) not in self._ignored_msgs:
yield 'useless-suppression', line, \
(msgs_store.get_msg_display_string(warning),)
# don't use iteritems here, _ignored_msgs may be modified by add_message
for (warning, from_), lines in list(self._ignored_msgs.items()):
for line in lines:
yield 'suppressed-message', line, \
(msgs_store.get_msg_display_string(warning), from_)
class MessagesStore(object):
"""The messages store knows information about every possible message but has
no particular state during analysis.
"""
def __init__(self):
# Primary registry for all active messages (i.e. all messages
# that can be emitted by pylint for the underlying Python
# version). It contains the 1:1 mapping from symbolic names
# to message definition objects.
self._messages = {}
# Maps alternative names (numeric IDs, deprecated names) to
# message definitions. May contain several names for each definition
# object.
self._alternative_names = {}
self._msgs_by_category = collections.defaultdict(list)
@property
def messages(self):
"""The list of all active messages."""
return six.itervalues(self._messages)
def add_renamed_message(self, old_id, old_symbol, new_symbol):
"""Register the old ID and symbol for a warning that was renamed.
This allows users to keep using the old ID/symbol in suppressions.
"""
msg = self.check_message_id(new_symbol)
msg.old_names.append((old_id, old_symbol))
self._register_alternative_name(msg, old_id)
self._register_alternative_name(msg, old_symbol)
def register_messages(self, checker):
"""register a dictionary of messages
Keys are message ids, values are a 2-uple with the message type and the
message itself
message ids should be a string of len 4, where the two first characters
are the checker id and the two last the message id in this checker
"""
chkid = None
for msgid, msg_tuple in sorted(six.iteritems(checker.msgs)):
msg = build_message_def(checker, msgid, msg_tuple)
# avoid duplicate / malformed ids
if msg.symbol in self._messages or msg.symbol in self._alternative_names:
raise InvalidMessageError(
'Message symbol %r is already defined' % msg.symbol)
if chkid is not None and chkid != msg.msgid[1:3]:
raise InvalidMessageError(
"Inconsistent checker part in message id %r (expected 'x%sxx')"
% (msgid, chkid))
chkid = msg.msgid[1:3]
self._messages[msg.symbol] = msg
self._register_alternative_name(msg, msg.msgid)
for old_id, old_symbol in msg.old_names:
self._register_alternative_name(msg, old_id)
self._register_alternative_name(msg, old_symbol)
self._msgs_by_category[msg.msgid[0]].append(msg.msgid)
def _register_alternative_name(self, msg, name):
"""helper for register_message()"""
if name in self._messages and self._messages[name] != msg:
raise InvalidMessageError(
'Message symbol %r is already defined' % name)
if name in self._alternative_names and self._alternative_names[name] != msg:
raise InvalidMessageError(
'Message %s %r is already defined' % (
'id' if len(name) == 5 and name[0] in MSG_TYPES else 'alternate name',
name))
self._alternative_names[name] = msg
def check_message_id(self, msgid):
"""returns the Message object for this message.
msgid may be either a numeric or symbolic id.
Raises UnknownMessageError if the message id is not defined.
"""
if msgid[1:].isdigit():
msgid = msgid.upper()
for source in (self._alternative_names, self._messages):
try:
return source[msgid]
except KeyError:
pass
raise UnknownMessageError('No such message id %s' % msgid)
def get_msg_display_string(self, msgid):
"""Generates a user-consumable representation of a message.
Can be just the message ID or the ID and the symbol.
"""
return repr(self.check_message_id(msgid).symbol)
def help_message(self, msgids):
"""display help messages for the given message identifiers"""
for msgid in msgids:
try:
print(self.check_message_id(msgid).format_help(checkerref=True))
print("")
except UnknownMessageError as ex:
print(ex)
print("")
continue
def list_messages(self):
"""output full messages list documentation in ReST format"""
msgs = sorted(six.itervalues(self._messages), key=lambda msg: msg.msgid)
for msg in msgs:
if not msg.may_be_emitted():
continue
print(msg.format_help(checkerref=False))
print("")
class ReportsHandlerMixIn(object):
"""a mix-in class containing all the reports and stats manipulation
related methods for the main lint class
"""
def __init__(self):
self._reports = collections.defaultdict(list)
self._reports_state = {}
def report_order(self):
""" Return a list of reports, sorted in the order
in which they must be called.
"""
return list(self._reports)
def register_report(self, reportid, r_title, r_cb, checker):
"""register a report
reportid is the unique identifier for the report
r_title the report's title
r_cb the method to call to make the report
checker is the checker defining the report
"""
reportid = reportid.upper()
self._reports[checker].append((reportid, r_title, r_cb))
def enable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = True
def disable_report(self, reportid):
"""disable the report of the given id"""
reportid = reportid.upper()
self._reports_state[reportid] = False
def report_is_enabled(self, reportid):
"""return true if the report associated to the given identifier is
enabled
"""
return self._reports_state.get(reportid, True)
def make_reports(self, stats, old_stats):
"""render registered reports"""
sect = Section('Report',
'%s statements analysed.'% (self.stats['statement']))
for checker in self.report_order():
for reportid, r_title, r_cb in self._reports[checker]:
if not self.report_is_enabled(reportid):
continue
report_sect = Section(r_title)
try:
r_cb(report_sect, stats, old_stats)
except EmptyReportError:
continue
report_sect.report_id = reportid
sect.append(report_sect)
return sect
def add_stats(self, **kwargs):
"""add some stats entries to the statistic dictionary
raise an AssertionError if there is a key conflict
"""
for key, value in six.iteritems(kwargs):
if key[-1] == '_':
key = key[:-1]
assert key not in self.stats
self.stats[key] = value
return self.stats
def _basename_in_blacklist_re(base_name, black_list_re):
"""Determines if the basename is matched in a regex blacklist
:param str base_name: The basename of the file
:param list black_list_re: A collection of regex patterns to match against.
Successful matches are blacklisted.
:returns: `True` if the basename is blacklisted, `False` otherwise.
:rtype: bool
"""
for file_pattern in black_list_re:
if file_pattern.match(base_name):
return True
return False
def _modpath_from_file(filename, is_namespace):
def _is_package_cb(path, parts):
return modutils.check_modpath_has_init(path, parts) or is_namespace
return modutils.modpath_from_file_with_callback(filename, is_package_cb=_is_package_cb)
def expand_modules(files_or_modules, black_list, black_list_re):
"""take a list of files/modules/packages and return the list of tuple
(file, module name) which have to be actually checked
"""
result = []
errors = []
for something in files_or_modules:
if os.path.basename(something) in black_list:
continue
if _basename_in_blacklist_re(os.path.basename(something), black_list_re):
continue
if exists(something):
# this is a file or a directory
try:
modname = '.'.join(modutils.modpath_from_file(something))
except ImportError:
modname = splitext(basename(something))[0]
if isdir(something):
filepath = join(something, '__init__.py')
else:
filepath = something
else:
# suppose it's a module or package
modname = something
try:
filepath = modutils.file_from_modpath(modname.split('.'))
if filepath is None:
continue
except (ImportError, SyntaxError) as ex:
# FIXME p3k : the SyntaxError is a Python bug and should be
# removed as soon as possible http://bugs.python.org/issue10588
errors.append({'key': 'fatal', 'mod': modname, 'ex': ex})
continue
filepath = normpath(filepath)
modparts = (modname or something).split('.')
try:
spec = modutils.file_info_from_modpath(modparts, path=sys.path)
except ImportError:
# Might not be acceptable, don't crash.
is_namespace = False
is_directory = isdir(something)
else:
is_namespace = modutils.is_namespace(spec)
is_directory = modutils.is_directory(spec)
if not is_namespace:
result.append({'path': filepath, 'name': modname, 'isarg': True,
'basepath': filepath, 'basename': modname})
has_init = (not (modname.endswith('.__init__') or modname == '__init__')
and basename(filepath) == '__init__.py')
if has_init or is_namespace or is_directory:
for subfilepath in modutils.get_module_files(dirname(filepath), black_list,
list_all=is_namespace):
if filepath == subfilepath:
continue
if _basename_in_blacklist_re(basename(subfilepath), black_list_re):
continue
modpath = _modpath_from_file(subfilepath, is_namespace)
submodname = '.'.join(modpath)
result.append({'path': subfilepath, 'name': submodname,
'isarg': False,
'basepath': filepath, 'basename': modname})
return result, errors
class PyLintASTWalker(object):
def __init__(self, linter):
# callbacks per node types
self.nbstatements = 0
self.visit_events = collections.defaultdict(list)
self.leave_events = collections.defaultdict(list)
self.linter = linter
def _is_method_enabled(self, method):
if not hasattr(method, 'checks_msgs'):
return True
for msg_desc in method.checks_msgs:
if self.linter.is_message_enabled(msg_desc):
return True
return False
def add_checker(self, checker):
"""walk to the checker's dir and collect visit and leave methods"""
# XXX : should be possible to merge needed_checkers and add_checker
vcids = set()
lcids = set()
visits = self.visit_events
leaves = self.leave_events
for member in dir(checker):
cid = member[6:]
if cid == 'default':
continue
if member.startswith('visit_'):
v_meth = getattr(checker, member)
# don't use visit_methods with no activated message:
if self._is_method_enabled(v_meth):
visits[cid].append(v_meth)
vcids.add(cid)
elif member.startswith('leave_'):
l_meth = getattr(checker, member)
# don't use leave_methods with no activated message:
if self._is_method_enabled(l_meth):
leaves[cid].append(l_meth)
lcids.add(cid)
visit_default = getattr(checker, 'visit_default', None)
if visit_default:
for cls in nodes.ALL_NODE_CLASSES:
cid = cls.__name__.lower()
if cid not in vcids:
visits[cid].append(visit_default)
# for now we have no "leave_default" method in Pylint
def walk(self, astroid):
"""call visit events of astroid checkers for the given node, recurse on
its children, then leave events.
"""
cid = astroid.__class__.__name__.lower()
# Detect if the node is a new name for a deprecated alias.
# In this case, favour the methods for the deprecated
# alias if any, in order to maintain backwards
# compatibility.
visit_events = ()
leave_events = ()
visit_events = self.visit_events.get(cid, ())
leave_events = self.leave_events.get(cid, ())
if astroid.is_statement:
self.nbstatements += 1
# generate events for this node on each checker
for cb in visit_events or ():
cb(astroid)
# recurse on children
for child in astroid.get_children():
self.walk(child)
for cb in leave_events or ():
cb(astroid)
PY_EXTS = ('.py', '.pyc', '.pyo', '.pyw', '.so', '.dll')
def register_plugins(linter, directory):
"""load all module and package in the given directory, looking for a
'register' function in each one, used to register pylint checkers
"""
imported = {}
for filename in os.listdir(directory):
base, extension = splitext(filename)
if base in imported or base == '__pycache__':
continue
if extension in PY_EXTS and base != '__init__' or (
not extension and isdir(join(directory, base))):
try:
module = modutils.load_module_from_file(join(directory, filename))
except ValueError:
# empty module name (usually emacs auto-save files)
continue
except ImportError as exc:
print("Problem importing module %s: %s" % (filename, exc),
file=sys.stderr)
else:
if hasattr(module, 'register'):
module.register(linter)
imported[base] = 1
def get_global_option(checker, option, default=None):
""" Retrieve an option defined by the given *checker* or
by all known option providers.
It will look in the list of all options providers
until the given *option* will be found.
If the option wasn't found, the *default* value will be returned.
"""
# First, try in the given checker's config.
# After that, look in the options providers.
try:
return getattr(checker.config, option.replace("-", "_"))
except AttributeError:
pass
for provider in checker.linter.options_providers:
for options in provider.options:
if options[0] == option:
return getattr(provider.config, option.replace("-", "_"))
return default
def deprecated_option(shortname=None, opt_type=None, help_msg=None, deprecation_msg=None):
def _warn_deprecated(option, optname, *args): # pylint: disable=unused-argument
if deprecation_msg:
sys.stderr.write(deprecation_msg % (optname,))
option = {
'help': help_msg,
'hide': True,
'type': opt_type,
'action': 'callback',
'callback': _warn_deprecated,
'deprecated': True
}
if shortname:
option['shortname'] = shortname
return option
def _splitstrip(string, sep=','):
"""return a list of stripped string by splitting the string given as
argument on `sep` (',' by default). Empty string are discarded.
>>> _splitstrip('a, b, c , 4,,')
['a', 'b', 'c', '4']
>>> _splitstrip('a')
['a']
>>> _splitstrip('a,\nb,\nc,')
['a', 'b', 'c']
:type string: str or unicode
:param string: a csv line
:type sep: str or unicode
:param sep: field separator, default to the comma (',')
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
return [word.strip() for word in string.split(sep) if word.strip()]
def _unquote(string):
"""remove optional quotes (simple or double) from the string
:type string: str or unicode
:param string: an optionally quoted string
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
if not string:
return string
if string[0] in '"\'':
string = string[1:]
if string[-1] in '"\'':
string = string[:-1]
return string
def _normalize_text(text, line_len=80, indent=''):
"""Wrap the text on the given line length."""
return '\n'.join(textwrap.wrap(text, width=line_len, initial_indent=indent,
subsequent_indent=indent))
def _check_csv(value):
if isinstance(value, (list, tuple)):
return value
return _splitstrip(value)
if six.PY2:
def _encode(string, encoding):
# pylint: disable=undefined-variable
if isinstance(string, unicode):
return string.encode(encoding)
return str(string)
else:
def _encode(string, _):
return str(string)
def _get_encoding(encoding, stream):
encoding = encoding or getattr(stream, 'encoding', None)
if not encoding:
import locale
encoding = locale.getpreferredencoding()
return encoding
def _comment(string):
"""return string as a comment"""
lines = [line.strip() for line in string.splitlines()]
return '# ' + ('%s# ' % os.linesep).join(lines)
def _format_option_value(optdict, value):
"""return the user input's value from a 'compiled' value"""
if isinstance(value, (list, tuple)):
value = ','.join(_format_option_value(optdict, item) for item in value)
elif isinstance(value, dict):
value = ','.join('%s:%s' % (k, v) for k, v in value.items())
elif hasattr(value, 'match'): # optdict.get('type') == 'regexp'
# compiled regexp
value = value.pattern
elif optdict.get('type') == 'yn':
value = 'yes' if value else 'no'
elif isinstance(value, six.string_types) and value.isspace():
value = "'%s'" % value
return value
def _ini_format_section(stream, section, options, encoding=None, doc=None):
"""format an options section using the INI format"""
encoding = _get_encoding(encoding, stream)
if doc:
print(_encode(_comment(doc), encoding), file=stream)
print('[%s]' % section, file=stream)
_ini_format(stream, options, encoding)
def _ini_format(stream, options, encoding):
"""format options using the INI format"""
for optname, optdict, value in options:
value = _format_option_value(optdict, value)
help = optdict.get('help')
if help:
help = _normalize_text(help, line_len=79, indent='# ')
print(file=stream)
print(_encode(help, encoding), file=stream)
else:
print(file=stream)
if value is None:
print('#%s=' % optname, file=stream)
else:
value = _encode(value, encoding).strip()
if re.match(r'^([\w-]+,)+[\w-]+$', str(value)):
separator = '\n ' + ' ' * len(optname)
value = separator.join(
x + ',' for x in str(value).split(','))
# remove trailing ',' from last element of the list
value = value[:-1]
print('%s=%s' % (optname, value), file=stream)
format_section = _ini_format_section
def _rest_format_section(stream, section, options, encoding=None, doc=None):
"""format an options section using as ReST formatted output"""
encoding = _get_encoding(encoding, stream)
if section:
print('%s\n%s' % (section, "'"*len(section)), file=stream)
if doc:
print(_encode(_normalize_text(doc, line_len=79, indent=''), encoding), file=stream)
print(file=stream)
for optname, optdict, value in options:
help = optdict.get('help')
print(':%s:' % optname, file=stream)
if help:
help = _normalize_text(help, line_len=79, indent=' ')
print(_encode(help, encoding), file=stream)
if value:
value = _encode(_format_option_value(optdict, value), encoding)
print(file=stream)
print(' Default: ``%s``' % value.replace("`` ", "```` ``"), file=stream)
|
{
"content_hash": "458f2e1f940b140571b073ac4d5fe2a3",
"timestamp": "",
"source": "github",
"line_count": 1198,
"max_line_length": 100,
"avg_line_length": 38.502504173622704,
"alnum_prop": 0.573754498547457,
"repo_name": "AtomLinter/linter-pylama",
"id": "9c078eba83ce005927988e2c344cf59da08b99da",
"size": "47819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/deps/pylint/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "22985"
},
{
"name": "Python",
"bytes": "2871797"
}
],
"symlink_target": ""
}
|
import copy
import os
import sys
import textwrap
from alembic.autogenerate import api as alembic_ag_api
from alembic import config as alembic_config
from alembic.operations import ops as alembic_ops
from alembic import script as alembic_script
import fixtures
import mock
from oslo_utils import fileutils
import pkg_resources
import sqlalchemy as sa
from neutron.db import migration
from neutron.db.migration import autogen
from neutron.db.migration import cli
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit import testlib_api
class FakeConfig(object):
service = ''
class FakeRevision(object):
path = 'fakepath'
def __init__(self, labels=None, down_revision=None, is_branch_point=False):
if not labels:
labels = set()
self.branch_labels = labels
self.down_revision = down_revision
self.is_branch_point = is_branch_point
self.revision = tools.get_random_string()
self.module = mock.MagicMock()
class MigrationEntrypointsMemento(fixtures.Fixture):
'''Create a copy of the migration entrypoints map so it can be restored
during test cleanup.
'''
def _setUp(self):
self.ep_backup = {}
for proj, ep in cli.migration_entrypoints.items():
self.ep_backup[proj] = copy.copy(ep)
self.addCleanup(self.restore)
def restore(self):
cli.migration_entrypoints = self.ep_backup
class TestDbMigration(base.BaseTestCase):
def setUp(self):
super(TestDbMigration, self).setUp()
mock.patch('alembic.op.get_bind').start()
self.mock_alembic_is_offline = mock.patch(
'alembic.context.is_offline_mode', return_value=False).start()
self.mock_alembic_is_offline.return_value = False
self.mock_sa_inspector = mock.patch(
'sqlalchemy.engine.reflection.Inspector').start()
def _prepare_mocked_sqlalchemy_inspector(self):
mock_inspector = mock.MagicMock()
mock_inspector.get_table_names.return_value = ['foo', 'bar']
mock_inspector.get_columns.return_value = [{'name': 'foo_column'},
{'name': 'bar_column'}]
self.mock_sa_inspector.from_engine.return_value = mock_inspector
def test_schema_has_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_table('foo'))
def test_schema_has_table_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_table, 'foo')
def test_schema_has_column_missing_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column('meh', 'meh'))
def test_schema_has_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_column('foo', 'foo_column'))
def test_schema_has_column_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_column,
'foo', 'foo_col')
def test_schema_has_column_missing_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column(
'foo', column_name='meh'))
class TestCli(base.BaseTestCase):
def setUp(self):
super(TestCli, self).setUp()
self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')
self.do_alembic_cmd = self.do_alembic_cmd_p.start()
self.mock_alembic_err = mock.patch('alembic.util.err').start()
self.mock_alembic_warn = mock.patch('alembic.util.warn').start()
self.mock_alembic_err.side_effect = SystemExit
def mocked_root_dir(cfg):
return os.path.join('/fake/dir', cli._get_project_base(cfg))
mock_root = mock.patch.object(cli, '_get_package_root_dir').start()
mock_root.side_effect = mocked_root_dir
# Avoid creating fake directories
mock.patch('oslo_utils.fileutils.ensure_tree').start()
# Set up some configs and entrypoints for tests to chew on
self.configs = []
self.projects = ('neutron', 'networking-foo', 'neutron-fwaas')
ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini')
self.useFixture(MigrationEntrypointsMemento())
cli.migration_entrypoints = {}
for project in self.projects:
config = alembic_config.Config(ini)
config.set_main_option('neutron_project', project)
module_name = project.replace('-', '_') + '.db.migration'
attrs = ('alembic_migrations',)
script_location = ':'.join([module_name, attrs[0]])
config.set_main_option('script_location', script_location)
self.configs.append(config)
entrypoint = pkg_resources.EntryPoint(project,
module_name,
attrs=attrs)
cli.migration_entrypoints[project] = entrypoint
def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]):
with mock.patch.object(sys, 'argv', argv),\
mock.patch.object(cli, 'run_sanity_checks'),\
mock.patch.object(cli, 'validate_revisions'):
cli.main()
def _append_version_path(args):
args = copy.copy(args)
if 'autogenerate' in args and not args['autogenerate']:
args['version_path'] = mock.ANY
return args
self.do_alembic_cmd.assert_has_calls(
[mock.call(mock.ANY, func_name, **_append_version_path(kwargs))
for kwargs in exp_kwargs]
)
def test_stamp(self):
self._main_test_helper(
['prog', 'stamp', 'foo'],
'stamp',
[{'revision': 'foo', 'sql': False}]
)
self._main_test_helper(
['prog', 'stamp', 'foo', '--sql'],
'stamp',
[{'revision': 'foo', 'sql': True}]
)
def _validate_cmd(self, cmd):
self._main_test_helper(
['prog', cmd],
cmd,
[{'verbose': False}])
self._main_test_helper(
['prog', cmd, '--verbose'],
cmd,
[{'verbose': True}])
def test_branches(self):
self._validate_cmd('branches')
def test_current(self):
self._validate_cmd('current')
def test_history(self):
self._validate_cmd('history')
def test_heads(self):
self._validate_cmd('heads')
def test_check_migration(self):
with mock.patch.object(cli, 'validate_head_files') as validate:
self._main_test_helper(['prog', 'check_migration'], 'branches')
self.assertEqual(len(self.projects), validate.call_count)
def _test_database_sync_revision(self, separate_branches=True):
with mock.patch.object(cli, 'update_head_files') as update:
if separate_branches:
mock.patch('os.path.exists').start()
expected_kwargs = [{
'message': 'message', 'sql': False, 'autogenerate': True,
}]
self._main_test_helper(
['prog', 'revision', '--autogenerate', '-m', 'message'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
expected_kwargs = [{
'message': 'message',
'sql': True,
'autogenerate': False,
'head': cli._get_branch_head(branch)
} for branch in cli.MIGRATION_BRANCHES]
for kwarg in expected_kwargs:
kwarg['autogenerate'] = False
kwarg['sql'] = True
self._main_test_helper(
['prog', 'revision', '--sql', '-m', 'message'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
expected_kwargs = [{
'message': 'message',
'sql': False,
'autogenerate': False,
'head': 'expand@head'
}]
self._main_test_helper(
['prog', 'revision', '-m', 'message', '--expand'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
for kwarg in expected_kwargs:
kwarg['head'] = 'contract@head'
self._main_test_helper(
['prog', 'revision', '-m', 'message', '--contract'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
def test_database_sync_revision(self):
self._test_database_sync_revision()
def test_database_sync_revision_no_branches(self):
# Test that old branchless approach is still supported
self._test_database_sync_revision(separate_branches=False)
def test_upgrade_revision(self):
self._main_test_helper(
['prog', 'upgrade', '--sql', 'head'],
'upgrade',
[{'desc': None, 'revision': 'heads', 'sql': True}]
)
def test_upgrade_delta(self):
self._main_test_helper(
['prog', 'upgrade', '--delta', '3'],
'upgrade',
[{'desc': None, 'revision': '+3', 'sql': False}]
)
def test_upgrade_revision_delta(self):
self._main_test_helper(
['prog', 'upgrade', 'kilo', '--delta', '3'],
'upgrade',
[{'desc': None, 'revision': 'kilo+3', 'sql': False}]
)
def test_upgrade_expand(self):
self._main_test_helper(
['prog', 'upgrade', '--expand'],
'upgrade',
[{'desc': cli.EXPAND_BRANCH,
'revision': 'expand@head',
'sql': False}]
)
def test_upgrade_expand_contract_are_mutually_exclusive(self):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--expand --contract'], 'upgrade')
def _test_upgrade_conflicts_with_revision(self, mode):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--%s revision1' % mode], 'upgrade')
def _test_upgrade_conflicts_with_delta(self, mode):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--%s +3' % mode], 'upgrade')
def _test_revision_autogenerate_conflicts_with_branch(self, branch):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'revision', '--autogenerate', '--%s' % branch],
'revision')
def test_revision_autogenerate_conflicts_with_expand(self):
self._test_revision_autogenerate_conflicts_with_branch(
cli.EXPAND_BRANCH)
def test_revision_autogenerate_conflicts_with_contract(self):
self._test_revision_autogenerate_conflicts_with_branch(
cli.CONTRACT_BRANCH)
def test_upgrade_expand_conflicts_with_revision(self):
self._test_upgrade_conflicts_with_revision('expand')
def test_upgrade_contract_conflicts_with_revision(self):
self._test_upgrade_conflicts_with_revision('contract')
def test_upgrade_expand_conflicts_with_delta(self):
self._test_upgrade_conflicts_with_delta('expand')
def test_upgrade_contract_conflicts_with_delta(self):
self._test_upgrade_conflicts_with_delta('contract')
def test_upgrade_contract(self):
self._main_test_helper(
['prog', 'upgrade', '--contract'],
'upgrade',
[{'desc': cli.CONTRACT_BRANCH,
'revision': 'contract@head',
'sql': False}]
)
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_upgrade_milestone_expand_before_contract(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)]
e_revs[3].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs + e_revs
self._main_test_helper(
['prog', '--subproject', 'neutron', 'upgrade', 'liberty'],
'upgrade',
[{'desc': cli.EXPAND_BRANCH,
'revision': e_revs[3].revision,
'sql': False},
{'desc': cli.CONTRACT_BRANCH,
'revision': c_revs[1].revision,
'sql': False}]
)
def assert_command_fails(self, command):
# Avoid cluttering stdout with argparse error messages
mock.patch('argparse.ArgumentParser._print_message').start()
with mock.patch.object(sys, 'argv', command), mock.patch.object(
cli, 'run_sanity_checks'):
self.assertRaises(SystemExit, cli.main)
def test_downgrade_fails(self):
self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno'])
def test_upgrade_negative_relative_revision_fails(self):
self.assert_command_fails(['prog', 'upgrade', '-2'])
def test_upgrade_negative_delta_fails(self):
self.assert_command_fails(['prog', 'upgrade', '--delta', '-2'])
def test_upgrade_rejects_delta_with_relative_revision(self):
self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3'])
def _test_validate_head_files_helper(self, heads, contract_head='',
expand_head=''):
fake_config = self.configs[0]
head_files_not_exist = (contract_head == expand_head == '')
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\
mock.patch('os.path.exists') as os_mock:
if head_files_not_exist:
os_mock.return_value = False
else:
os_mock.return_value = True
fc.return_value.get_heads.return_value = heads
revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),
heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}
fc.return_value.get_revision.side_effect = revs.__getitem__
mock_open_con = self.useFixture(
tools.OpenFixture(cli._get_contract_head_file_path(
fake_config), contract_head + '\n')).mock_open
mock_open_ex = self.useFixture(
tools.OpenFixture(cli._get_expand_head_file_path(
fake_config), expand_head + '\n')).mock_open
if contract_head in heads and expand_head in heads:
cli.validate_head_files(fake_config)
elif head_files_not_exist:
cli.validate_head_files(fake_config)
self.assertTrue(self.mock_alembic_warn.called)
else:
self.assertRaises(
SystemExit,
cli.validate_head_files,
fake_config
)
self.assertTrue(self.mock_alembic_err.called)
if contract_head in heads and expand_head in heads:
mock_open_ex.assert_called_with(
cli._get_expand_head_file_path(fake_config))
mock_open_con.assert_called_with(
cli._get_contract_head_file_path(fake_config))
if not head_files_not_exist:
fc.assert_called_once_with(fake_config)
def test_validate_head_files_success(self):
self._test_validate_head_files_helper(['a', 'b'], contract_head='a',
expand_head='b')
def test_validate_head_files_missing_file(self):
self._test_validate_head_files_helper(['a', 'b'])
def test_validate_head_files_wrong_contents(self):
self._test_validate_head_files_helper(['a', 'b'], contract_head='c',
expand_head='d')
@mock.patch.object(fileutils, 'delete_if_exists')
def test_update_head_files_success(self, *mocks):
heads = ['a', 'b']
mock_open_con = self.useFixture(
tools.OpenFixture(cli._get_contract_head_file_path(
self.configs[0]))).mock_open
mock_open_ex = self.useFixture(
tools.OpenFixture(cli._get_expand_head_file_path(
self.configs[0]))).mock_open
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
fc.return_value.get_heads.return_value = heads
revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),
heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}
fc.return_value.get_revision.side_effect = revs.__getitem__
cli.update_head_files(self.configs[0])
mock_open_con.return_value.write.assert_called_with(
heads[0] + '\n')
mock_open_ex.return_value.write.assert_called_with(heads[1] + '\n')
old_head_file = cli._get_head_file_path(
self.configs[0])
old_heads_file = cli._get_heads_file_path(
self.configs[0])
delete_if_exists = mocks[0]
self.assertIn(mock.call(old_head_file),
delete_if_exists.call_args_list)
self.assertIn(mock.call(old_heads_file),
delete_if_exists.call_args_list)
def test_get_project_base(self):
config = alembic_config.Config()
config.set_main_option('script_location', 'a.b.c:d')
proj_base = cli._get_project_base(config)
self.assertEqual('a', proj_base)
def test_get_root_versions_dir(self):
config = alembic_config.Config()
config.set_main_option('script_location', 'a.b.c:d')
versions_dir = cli._get_root_versions_dir(config)
self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir)
def test_get_subproject_script_location(self):
foo_ep = cli._get_subproject_script_location('networking-foo')
expected = 'networking_foo.db.migration:alembic_migrations'
self.assertEqual(expected, foo_ep)
def test_get_subproject_script_location_not_installed(self):
self.assertRaises(
SystemExit, cli._get_subproject_script_location, 'not-installed')
def test_get_subproject_base_not_installed(self):
self.assertRaises(
SystemExit, cli._get_subproject_base, 'not-installed')
def test__compare_labels_ok(self):
labels = {'label1', 'label2'}
fake_revision = FakeRevision(labels)
cli._compare_labels(fake_revision, {'label1', 'label2'})
def test__compare_labels_fail_unexpected_labels(self):
labels = {'label1', 'label2', 'label3'}
fake_revision = FakeRevision(labels)
self.assertRaises(
SystemExit,
cli._compare_labels, fake_revision, {'label1', 'label2'})
@mock.patch.object(cli, '_compare_labels')
def test__validate_single_revision_labels_branchless_fail_different_labels(
self, compare_mock):
fake_down_revision = FakeRevision()
fake_revision = FakeRevision(down_revision=fake_down_revision)
script_dir = mock.Mock()
script_dir.get_revision.return_value = fake_down_revision
cli._validate_single_revision_labels(script_dir, fake_revision,
label=None)
expected_labels = set()
compare_mock.assert_has_calls(
[mock.call(revision, expected_labels)
for revision in (fake_revision, fake_down_revision)]
)
@mock.patch.object(cli, '_compare_labels')
def test__validate_single_revision_labels_branches_fail_different_labels(
self, compare_mock):
fake_down_revision = FakeRevision()
fake_revision = FakeRevision(down_revision=fake_down_revision)
script_dir = mock.Mock()
script_dir.get_revision.return_value = fake_down_revision
cli._validate_single_revision_labels(
script_dir, fake_revision, label='fakebranch')
expected_labels = {'fakebranch'}
compare_mock.assert_has_calls(
[mock.call(revision, expected_labels)
for revision in (fake_revision, fake_down_revision)]
)
@mock.patch.object(cli, '_validate_single_revision_labels')
def test__validate_revision_validates_branches(self, validate_mock):
script_dir = mock.Mock()
fake_revision = FakeRevision()
branch = cli.MIGRATION_BRANCHES[0]
fake_revision.path = os.path.join('/fake/path', branch)
cli._validate_revision(script_dir, fake_revision)
validate_mock.assert_called_with(
script_dir, fake_revision, label=branch)
@mock.patch.object(cli, '_validate_single_revision_labels')
def test__validate_revision_validates_branchless_migrations(
self, validate_mock):
script_dir = mock.Mock()
fake_revision = FakeRevision()
cli._validate_revision(script_dir, fake_revision)
validate_mock.assert_called_with(script_dir, fake_revision)
@mock.patch.object(cli, '_validate_revision')
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_validate_revisions_walks_thru_all_revisions(
self, walk_mock, validate_mock):
revisions = [FakeRevision() for i in range(10)]
walk_mock.return_value = revisions
cli.validate_revisions(self.configs[0])
validate_mock.assert_has_calls(
[mock.call(mock.ANY, revision) for revision in revisions]
)
@mock.patch.object(cli, '_validate_revision')
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_validate_revisions_fails_on_multiple_branch_points(
self, walk_mock, validate_mock):
revisions = [FakeRevision(is_branch_point=True) for i in range(2)]
walk_mock.return_value = revisions
self.assertRaises(
SystemExit, cli.validate_revisions, self.configs[0])
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__get_branch_points(self, walk_mock):
revisions = [FakeRevision(is_branch_point=tools.get_random_boolean)
for i in range(50)]
walk_mock.return_value = revisions
script_dir = alembic_script.ScriptDirectory.from_config(
self.configs[0])
self.assertEqual(set(rev for rev in revisions if rev.is_branch_point),
set(cli._get_branch_points(script_dir)))
@mock.patch.object(cli, '_get_version_branch_path')
def test_autogen_process_directives(self, get_version_branch_path):
get_version_branch_path.side_effect = lambda cfg, release, branch: (
"/foo/expand" if branch == 'expand' else "/foo/contract")
migration_script = alembic_ops.MigrationScript(
'eced083f5df',
# these directives will be split into separate
# expand/contract scripts
alembic_ops.UpgradeOps(
ops=[
alembic_ops.CreateTableOp(
'organization',
[
sa.Column('id', sa.Integer(), primary_key=True),
sa.Column('name', sa.String(50), nullable=False)
]
),
alembic_ops.ModifyTableOps(
'user',
ops=[
alembic_ops.AddColumnOp(
'user',
sa.Column('organization_id', sa.Integer())
),
alembic_ops.CreateForeignKeyOp(
'org_fk', 'user', 'organization',
['organization_id'], ['id']
),
alembic_ops.DropConstraintOp(
'user', 'uq_user_org'
),
alembic_ops.DropColumnOp(
'user', 'organization_name'
)
]
)
]
),
# these will be discarded
alembic_ops.DowngradeOps(
ops=[
alembic_ops.AddColumnOp(
'user', sa.Column(
'organization_name', sa.String(50), nullable=True)
),
alembic_ops.CreateUniqueConstraintOp(
'uq_user_org', 'user',
['user_name', 'organization_name']
),
alembic_ops.ModifyTableOps(
'user',
ops=[
alembic_ops.DropConstraintOp('org_fk', 'user'),
alembic_ops.DropColumnOp('user', 'organization_id')
]
),
alembic_ops.DropTableOp('organization')
]
),
message='create the organization table and '
'replace user.organization_name'
)
directives = [migration_script]
autogen.process_revision_directives(
mock.Mock(), mock.Mock(), directives
)
expand = directives[0]
contract = directives[1]
self.assertEqual("/foo/expand", expand.version_path)
self.assertEqual("/foo/contract", contract.version_path)
self.assertTrue(expand.downgrade_ops.is_empty())
self.assertTrue(contract.downgrade_ops.is_empty())
self.assertEqual(
textwrap.dedent("""\
### commands auto generated by Alembic - please adjust! ###
op.create_table('organization',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.add_column('user', """
"""sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key('org_fk', 'user', """
"""'organization', ['organization_id'], ['id'])
### end Alembic commands ###"""),
alembic_ag_api.render_python_code(expand.upgrade_ops)
)
self.assertEqual(
textwrap.dedent("""\
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('user', 'uq_user_org', type_=None)
op.drop_column('user', 'organization_name')
### end Alembic commands ###"""),
alembic_ag_api.render_python_code(contract.upgrade_ops)
)
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_one_branch(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs
m = cli._find_milestone_revisions(self.configs[0], 'liberty',
cli.CONTRACT_BRANCH)
self.assertEqual(1, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'liberty',
cli.EXPAND_BRANCH)
self.assertEqual(0, len(m))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_two_branches(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)]
e_revs[3].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs + e_revs
m = cli._find_milestone_revisions(self.configs[0], 'liberty')
self.assertEqual(2, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'mitaka')
self.assertEqual(0, len(m))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_branchless(self, walk_mock):
revisions = [FakeRevision() for r in range(5)]
revisions[2].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = revisions
m = cli._find_milestone_revisions(self.configs[0], 'liberty')
self.assertEqual(1, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'mitaka')
self.assertEqual(0, len(m))
class TestSafetyChecks(base.BaseTestCase):
def test_validate_revisions(self, *mocks):
cli.validate_revisions(cli.get_neutron_config())
|
{
"content_hash": "64df6c4d96ac0b9507f056a9ef22c559",
"timestamp": "",
"source": "github",
"line_count": 729,
"max_line_length": 79,
"avg_line_length": 40.40466392318244,
"alnum_prop": 0.569852317093872,
"repo_name": "cloudbase/neutron",
"id": "fa403b78b0ced6278ce3cad015dc9e5a7e1f1b8e",
"size": "30105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/db/test_migration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9942988"
},
{
"name": "Shell",
"bytes": "14325"
}
],
"symlink_target": ""
}
|
import os
import posixpath
import re
import signal
import sys
import time
import psutil
import pytest
from daemonocle import Daemon, DaemonError
def test_simple(pyscript):
script = pyscript("""
import sys
import time
from daemonocle import Daemon
def worker():
time.sleep(2)
daemon = Daemon(worker=worker, name='foo')
daemon.do_action(sys.argv[1])
""")
result = script.run('start')
assert result.returncode == 0
assert result.stdout == b'Starting foo ... OK\n'
assert result.stderr == b''
result = script.run('status')
assert result.returncode == 1
assert result.stdout == b''
assert (b'DaemonError: Cannot get status of daemon '
b'without PID file') in result.stderr
result = script.run('stop')
assert result.returncode == 1
assert result.stdout == b''
assert b'DaemonError: Cannot stop daemon without PID file' in result.stderr
def test_no_args_or_worker():
daemon = Daemon()
assert daemon.name == posixpath.basename(sys.argv[0])
with pytest.raises(DaemonError):
daemon.do_action('start')
def test_immediate_exit(pyscript):
script = pyscript("""
import sys
from daemonocle import Daemon
def worker():
sys.exit(42)
daemon = Daemon(worker=worker, name='foo')
daemon.do_action('start')
""")
result = script.run()
assert result.returncode == 42
assert result.stdout == b'Starting foo ... FAILED\n'
assert result.stderr == (b'ERROR: Child exited immediately with '
b'exit code 42\n')
def test_non_detached(pyscript):
script = pyscript("""
from daemonocle import Daemon
def worker():
print('hello world')
daemon = Daemon(worker=worker, name='foo', detach=False)
daemon.do_action('start')
""")
result = script.run()
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_non_detached_signal_forwarding_without_pid_file(pyscript):
script = pyscript("""
import time
from daemonocle import Daemon
def worker():
print('hello world')
time.sleep(10)
daemon = Daemon(worker=worker, name='foo', detach=False)
daemon.do_action('start')
""")
script.start()
time.sleep(2)
os.kill(script.process.pid, signal.SIGTERM)
result = script.join()
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'Received signal SIGTERM (15). Forwarding to child...\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_non_detached_signal_forwarding_with_pid_file(pyscript):
script = pyscript("""
import time
from daemonocle import Daemon
def worker():
print('hello world')
time.sleep(10)
daemon = Daemon(worker=worker, name='foo', detach=False,
pid_file='foo.pid')
daemon.do_action('start')
""")
script.start()
time.sleep(2)
os.kill(script.process.pid, signal.SIGTERM)
result = script.join()
assert result.returncode == 0
assert result.stdout == (
b'Starting foo ... OK\n'
b'hello world\n'
b'Received signal SIGTERM (15). Forwarding to child...\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_pidfile(pyscript):
script = pyscript("""
import sys
import time
from daemonocle import Daemon
def worker():
time.sleep(10)
daemon = Daemon(worker=worker, name='foo', pid_file='foo.pid')
daemon.do_action(sys.argv[1])
""")
status_pattern = re.compile(
br'^foo -- pid: (\d+), status: (?:running|sleeping), '
br'uptime: [0-9mhd ]+, %cpu: \d+\.\d, %mem: \d+\.\d\n$')
result = script.run('start')
assert result.returncode == 0
assert result.stdout == b'Starting foo ... OK\n'
assert result.stderr == b''
result = script.run('status')
assert result.returncode == 0
match = status_pattern.match(result.stdout)
assert match
pid1 = int(match.group(1))
assert result.stderr == b''
result = script.run('start')
assert result.returncode == 0
assert result.stdout == b''
assert result.stderr == ('WARNING: foo already running with PID '
'{pid}\n'.format(pid=pid1)).encode('utf-8')
result = script.run('restart')
assert result.returncode == 0
assert result.stdout == b'Stopping foo ... OK\nStarting foo ... OK\n'
assert result.stderr == b''
result = script.run('status')
assert result.returncode == 0
match = status_pattern.match(result.stdout)
assert match
pid2 = int(match.group(1))
assert pid1 != pid2
assert result.stderr == b''
result = script.run('stop')
assert result.returncode == 0
assert result.stdout == b'Stopping foo ... OK\n'
assert result.stderr == b''
result = script.run('status')
assert result.returncode == 1
assert result.stdout == b'foo -- not running\n'
assert result.stderr == b''
result = script.run('stop')
assert result.returncode == 0
assert result.stdout == b''
assert result.stderr == b'WARNING: foo is not running\n'
def test_piddir(pyscript):
script = pyscript("""
import sys
import time
from daemonocle import Daemon
def worker():
time.sleep(10)
daemon = Daemon(worker=worker, name='foo', pid_file='foo/foo.pid')
daemon.do_action(sys.argv[1])
""")
piddir = posixpath.join(script.dirname, 'foo')
script.run('start')
assert posixpath.isdir(piddir)
assert os.listdir(piddir) == ['foo.pid']
script.run('stop')
assert posixpath.isdir(piddir)
assert os.listdir(piddir) == []
def test_broken_pidfile(pyscript):
script = pyscript("""
import sys
import time
from daemonocle import Daemon
def worker():
time.sleep(10)
daemon = Daemon(worker=worker, name='foo', pid_file='foo.pid')
daemon.do_action(sys.argv[1])
""")
pid_file = posixpath.realpath(posixpath.join(script.dirname, 'foo.pid'))
script.run('start')
# Break the PID file
with open(pid_file, 'wb') as f:
f.write(b'banana\n')
result = script.run('status')
assert result.returncode == 1
assert result.stdout == b'foo -- not running\n'
assert result.stderr == ('WARNING: Empty or broken PID file {pid_file}; '
'removing\n').format(
pid_file=pid_file).encode('utf8')
result = script.run('stop')
assert result.returncode == 0
assert result.stdout == b''
assert result.stderr == b'WARNING: foo is not running\n'
def test_stale_pidfile(pyscript):
script = pyscript("""
import sys
import time
from daemonocle import Daemon
def worker():
time.sleep(10)
daemon = Daemon(worker=worker, name='foo', pid_file='foo.pid')
daemon.do_action(sys.argv[1])
""")
pid_file = posixpath.realpath(posixpath.join(script.dirname, 'foo.pid'))
script.run('start')
with open(pid_file, 'rb') as f:
pid = int(f.read())
os.kill(pid, signal.SIGKILL)
result = script.run('status')
assert result.returncode == 1
assert result.stdout == b'foo -- not running\n'
assert result.stderr == b''
assert not posixpath.isfile(pid_file)
result = script.run('stop')
assert result.returncode == 0
assert result.stdout == b''
assert result.stderr == b'WARNING: foo is not running\n'
def test_stdout_and_stderr_file(pyscript):
script = pyscript("""
import sys
import time
from daemonocle import Daemon
def worker():
sys.stdout.write('1ohhyMgprGBsSgPF7R388fs1VYtF3UyxCzp\\n')
sys.stdout.flush()
sys.stderr.write('1PMQcUFXReMo8V4jRK8sRkixpGm6TVb1KJJ\\n')
sys.stderr.flush()
time.sleep(10)
daemon = Daemon(worker=worker, name='foo', pid_file='foo.pid',
stdout_file='stdout.log', stderr_file='stderr.log')
daemon.do_action(sys.argv[1])
""")
pid_file = posixpath.realpath(posixpath.join(script.dirname, 'foo.pid'))
result = script.run('start')
try:
assert result.returncode == 0
assert result.stdout == b'Starting foo ... OK\n'
assert result.stderr == b''
with open(pid_file, 'rb') as f:
proc = psutil.Process(int(f.read()))
assert proc.status() in {psutil.STATUS_RUNNING, psutil.STATUS_SLEEPING}
with open(posixpath.join(script.dirname, 'stdout.log'), 'rb') as f:
assert f.read() == b'1ohhyMgprGBsSgPF7R388fs1VYtF3UyxCzp\n'
with open(posixpath.join(script.dirname, 'stderr.log'), 'rb') as f:
assert f.read() == b'1PMQcUFXReMo8V4jRK8sRkixpGm6TVb1KJJ\n'
finally:
result = script.run('stop')
assert result.returncode == 0
assert result.stdout == b'Stopping foo ... OK\n'
assert result.stderr == b''
def test_stdout_and_stderr_file_same_path(pyscript):
script = pyscript("""
import sys
import time
from daemonocle import Daemon
def worker():
sys.stdout.write('1XPRq1KToN6Wz1y1PeR2dj8BNrnjiPTPaup\\n')
sys.stdout.flush()
sys.stderr.write('29qM7pLGqgwwhGAVrWxnce14AsQicSWHnwE\\n')
sys.stderr.flush()
time.sleep(10)
daemon = Daemon(worker=worker, name='foo', pid_file='foo.pid',
stdout_file='output.log', stderr_file='output.log')
daemon.do_action(sys.argv[1])
""")
pid_file = posixpath.realpath(posixpath.join(script.dirname, 'foo.pid'))
result = script.run('start')
try:
assert result.returncode == 0
assert result.stdout == b'Starting foo ... OK\n'
assert result.stderr == b''
with open(pid_file, 'rb') as f:
proc = psutil.Process(int(f.read()))
assert proc.status() in {psutil.STATUS_RUNNING, psutil.STATUS_SLEEPING}
with open(posixpath.join(script.dirname, 'output.log'), 'rb') as f:
assert f.read() == (
b'1XPRq1KToN6Wz1y1PeR2dj8BNrnjiPTPaup\n'
b'29qM7pLGqgwwhGAVrWxnce14AsQicSWHnwE\n')
finally:
result = script.run('stop')
assert result.returncode == 0
assert result.stdout == b'Stopping foo ... OK\n'
assert result.stderr == b''
def test_status_uptime(pyscript):
script = pyscript("""
import sys
import time
from daemonocle import Daemon
def worker():
time.sleep(10)
daemon = Daemon(worker=worker, name='foo', pid_file='foo.pid')
now = time.time()
if sys.argv[1] == 'status':
time.time = lambda: now + int(sys.argv[2])
daemon.do_action(sys.argv[1])
""")
status_pattern = re.compile(
br'^foo -- pid: \d+, status: (?:running|sleeping), '
br'uptime: ([0-9mhd ]+), %cpu: \d+\.\d, %mem: \d+\.\d\n$')
script.run('start')
result = script.run('status', '0')
match = status_pattern.match(result.stdout)
assert match
assert match.group(1) == b'0m'
result = script.run('status', '1000')
match = status_pattern.match(result.stdout)
assert match
assert match.group(1) == b'17m'
result = script.run('status', '10000')
match = status_pattern.match(result.stdout)
assert match
assert match.group(1) == b'2h 47m'
result = script.run('status', '100000')
match = status_pattern.match(result.stdout)
assert match
assert match.group(1) == b'1d 3h 47m'
result = script.run('status', '1000000')
match = status_pattern.match(result.stdout)
assert match
assert match.group(1) == b'11d 13h 47m'
def test_self_reload(pyscript):
script = pyscript("""
import os
from daemonocle import Daemon
daemon = Daemon(name='foo', pid_file='foo.pid', detach=False)
def worker():
print('here is my pid: {}'.format(os.getpid()))
if not os.environ.get('DAEMONOCLE_RELOAD'):
daemon.reload()
daemon.worker = worker
daemon.do_action('start')
""")
result = script.run()
assert result.returncode == 0
match = re.match((
br'^Starting foo \.\.\. OK\n'
br'here is my pid: (\d+)\n'
br'Reloading foo \.\.\. OK\n'
br'here is my pid: (\d+)\n'
br'All children are gone\. Parent is exiting\.\.\.\n$'),
result.stdout)
assert match
assert match.group(1) != match.group(2)
assert result.stderr == b''
daemon = Daemon()
with pytest.raises(DaemonError):
# Don't allow calling reload like this
daemon.reload()
def test_subclass(pyscript):
script = pyscript("""
import daemonocle
class MyDaemon(daemonocle.Daemon):
name = '1jizQzV9STeyLTDgL3kiESxnMMRtk9HvGJE'
def __init__(self):
super(MyDaemon, self).__init__(detach=False)
def worker(self):
print('I am {name}'.format(name=self.name))
print('also 1ZX5KG8RWZwewPFSgkWhtQiuWfAGTobEtFM')
if __name__ == '__main__':
MyDaemon().do_action('start')
""")
result = script.run()
assert result.returncode == 0
assert result.stdout == (
b'Starting 1jizQzV9STeyLTDgL3kiESxnMMRtk9HvGJE ... OK\n'
b'I am 1jizQzV9STeyLTDgL3kiESxnMMRtk9HvGJE\n'
b'also 1ZX5KG8RWZwewPFSgkWhtQiuWfAGTobEtFM\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
def test_start_hook(pyscript):
script = pyscript("""
from daemonocle import Daemon
def start_hook(debug):
print('debug={!r}'.format(debug))
print('2NJSuZFwJcgHYGWup4xHzFR8MtdTUE3johy')
def main():
print('1ZCW56TawPekaVmeQ1GwEg8BgrpPhsvp41s')
if __name__ == '__main__':
Daemon(name='foo', worker=main, hooks={'start': start_hook}).cli()
""")
result = script.run('start', '--debug')
assert result.returncode == 0
assert result.stdout == (
b'debug=True\n'
b'2NJSuZFwJcgHYGWup4xHzFR8MtdTUE3johy\n'
b'Starting foo ... OK\n'
b'1ZCW56TawPekaVmeQ1GwEg8BgrpPhsvp41s\n'
b'All children are gone. Parent is exiting...\n')
assert result.stderr == b''
|
{
"content_hash": "f88f375041f3a427b44cfbc322148371",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 79,
"avg_line_length": 29.336614173228348,
"alnum_prop": 0.5918271488961954,
"repo_name": "jnrbsn/daemonocle",
"id": "e6a4a011f013b33678e275d68db756446c2b65b5",
"size": "14903",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_basics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25478"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.forms import *
from django.contrib.auth import login, logout, get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response as render
from django.template import RequestContext, loader, Context
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext as _
from django.utils.http import urlquote_plus
from django.core.mail import send_mail
from openid.consumer.consumer import Consumer, \
SUCCESS, CANCEL, FAILURE, SETUP_NEEDED
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import sreg, ax
# needed for some linux distributions like debian
try:
from openid.yadis import xri
except ImportError:
from yadis import xri
import re
import urllib
from django_authopenid import DjangoOpenIDStore
from django_authopenid.forms import *
from django_authopenid.models import UserAssociation
from django_authopenid.signals import oid_register
from django_authopenid.utils import *
try:
from registration.forms import RegistrationForm
except ImportError:
from django.forms import Form as RegistrationForm
def _build_context(request, extra_context=None):
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return context
def ask_openid(request, openid_url, redirect_to, on_failure=None):
""" basic function to ask openid and return response """
on_failure = on_failure or signin_failure
sreg_req = None
ax_req = None
trust_root = getattr(
settings, 'OPENID_TRUST_ROOT', get_url_host(request) + '/'
)
if xri.identifierScheme(openid_url) == 'XRI' and getattr(
settings, 'OPENID_DISALLOW_INAMES', False
):
msg = _("i-names are not supported")
return on_failure(request, msg)
consumer = Consumer(request.session, DjangoOpenIDStore())
try:
auth_request = consumer.begin(openid_url)
except DiscoveryFailure:
msg = _("The OpenID %s was invalid") % openid_url
return on_failure(request, msg)
# get capabilities
use_ax, use_sreg = discover_extensions(openid_url)
if use_sreg:
# set sreg extension
# we always ask for nickname and email
sreg_attrs = getattr(settings, 'OPENID_SREG', {})
sreg_attrs.update({ "optional": ['nickname', 'email'] })
sreg_req = sreg.SRegRequest(**sreg_attrs)
if use_ax:
# set ax extension
# we always ask for nickname and email
ax_req = ax.FetchRequest()
ax_req.add(ax.AttrInfo('http://schema.openid.net/contact/email',
alias='email', required=True))
ax_req.add(ax.AttrInfo('http://schema.openid.net/namePerson/friendly',
alias='nickname', required=True))
# add custom ax attrs
ax_attrs = getattr(settings, 'OPENID_AX', [])
for attr in ax_attrs:
if len(attr) == 2:
ax_req.add(ax.AttrInfo(attr[0], required=alias[1]))
else:
ax_req.add(ax.AttrInfo(attr[0]))
if sreg_req is not None:
auth_request.addExtension(sreg_req)
if ax_req is not None:
auth_request.addExtension(ax_req)
redirect_url = auth_request.redirectURL(trust_root, redirect_to)
return HttpResponseRedirect(redirect_url)
def complete(request, on_success=None, on_failure=None, return_to=None,
**kwargs):
""" complete openid signin """
on_success = on_success or default_on_success
on_failure = on_failure or default_on_failure
consumer = Consumer(request.session, DjangoOpenIDStore())
# make sure params are encoded in utf8
params = dict((k,smart_unicode(v)) for k, v in request.GET.items())
openid_response = consumer.complete(params, return_to)
if openid_response.status == SUCCESS:
return on_success(request, openid_response.identity_url,
openid_response, **kwargs)
elif openid_response.status == CANCEL:
return on_failure(request, 'The request was canceled', **kwargs)
elif openid_response.status == FAILURE:
return on_failure(request, openid_response.message, **kwargs)
elif openid_response.status == SETUP_NEEDED:
return on_failure(request, 'Setup needed', **kwargs)
else:
assert False, "Bad openid status: %s" % openid_response.status
def default_on_success(request, identity_url, openid_response, **kwargs):
""" default action on openid signin success """
request.session['openid'] = from_openid_response(openid_response)
return HttpResponseRedirect(clean_next(request.GET.get('next')))
def default_on_failure(request, message, **kwargs):
""" default failure action on signin """
return render('openid_failure.html', {
'message': message
})
def not_authenticated(func):
""" decorator that redirect user to next page if
he is already logged."""
def decorated(request, *args, **kwargs):
if request.user.is_authenticated():
next = request.GET.get("next", settings.LOGIN_REDIRECT_URL)
return HttpResponseRedirect(next)
return func(request, *args, **kwargs)
return decorated
def signin_success(request, identity_url, openid_response,
redirect_field_name=REDIRECT_FIELD_NAME, **kwargs):
"""
openid signin success.
If the openid is already registered, the user is redirected to
url set par next or in settings with OPENID_REDIRECT_NEXT variable.
If none of these urls are set user is redirectd to /.
if openid isn't registered user is redirected to register page.
"""
openid_ = from_openid_response(openid_response)
openids = request.session.get('openids', [])
openids.append(openid_)
request.session['openids'] = openids
request.session['openid'] = openid_
try:
rel = UserAssociation.objects.get(openid_url__exact = str(openid_))
except:
# try to register this new user
redirect_to = request.REQUEST.get(redirect_field_name, '')
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(
"%s?%s" % (reverse('user_register'),
urllib.urlencode({ redirect_field_name: redirect_to }))
)
user_ = rel.user
if user_.is_active:
user_.backend = "django.contrib.auth.backends.ModelBackend"
login(request, user_)
redirect_to = request.GET.get(redirect_field_name, '')
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(redirect_to)
def signin_failure(request, message, template_name='authopenid/signin.html',
redirect_field_name=REDIRECT_FIELD_NAME, openid_form=OpenidSigninForm,
auth_form=AuthenticationForm, extra_context=None, **kwargs):
"""
falure with openid signin. Go back to signin page.
:attr request: request object
:attr template_name: string, name of template to use, default is
'authopenid/signin.html'
:attr redirect_field_name: string, field name used for redirect. by default
'next'
:attr openid_form: form use for openid signin, by default `OpenidSigninForm`
:attr auth_form: form object used for legacy authentification.
by default AuthentificationForm form auser auth contrib.
:attr extra_context: A dictionary of variables to add to the template
context. Any callable object in this dictionary will be called to produce
the end result which appears in the context.
"""
return render(template_name, {
'msg': message,
'form1': openid_form(),
'form2': auth_form(),
redirect_field_name: request.REQUEST.get(redirect_field_name, '')
}, context_instance=_build_context(request, extra_context))
@not_authenticated
def signin(request, template_name='authopenid/signin.html',
redirect_field_name=REDIRECT_FIELD_NAME, openid_form=OpenidSigninForm,
auth_form=AuthenticationForm, register_form=RegistrationForm, backend=None,
on_failure=None, extra_context=None):
"""Signin page. It manage the legacy authentification (user/password)
and authentification with openid.
:attr request: request object
:attr template_name: string, name of template to use
:attr redirect_field_name: string, field name used for redirect. by
default 'next'
:attr openid_form: form use for openid signin, by default
`OpenidSigninForm`
:attr auth_form: form object used for legacy authentification.
By default AuthentificationForm form auser auth contrib.
:attr extra_context: A dictionary of variables to add to the
template context. Any callable object in this dictionary will
be called to produce the end result which appears in the context.
"""
if on_failure is None:
on_failure = signin_failure
redirect_to = request.REQUEST.get(redirect_field_name, '')
form1 = openid_form()
form2 = auth_form()
form3 = register_form()
if request.POST:
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
if 'openid_url' in request.POST.keys():
form1 = openid_form(data=request.POST)
if form1.is_valid():
redirect_url = "%s%s?%s" % (
get_url_host(request),
reverse('user_complete_signin'),
urllib.urlencode({ redirect_field_name: redirect_to })
)
return ask_openid(request,
form1.cleaned_data['openid_url'],
redirect_url,
on_failure=on_failure)
elif 'email' in request.POST.keys():
form3 = register_form(data=request.POST)
if form3.is_valid():
# if `registration` application is installed => redirect to it's register view
if 'registration' in settings.INSTALLED_APPS:
from registration.backends.default.views import RegistrationView
register = RegistrationView.as_view(form_class=register_form)
return register(request)
# otherwise simply save form
else:
form3.save()
return HttpResponseRedirect(reverse('registration_complete'))
else:
# perform normal django authentification
form2 = auth_form(data=request.POST)
if form2.is_valid():
login(request, form2.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
return render(template_name, {
'form1': form1,
'form2': form2,
'form3': form3,
redirect_field_name: redirect_to,
'msg': request.GET.get('msg','')
}, context_instance=_build_context(request, extra_context=extra_context))
def complete_signin(request, redirect_field_name=REDIRECT_FIELD_NAME,
openid_form=OpenidSigninForm, auth_form=AuthenticationForm,
on_success=signin_success, on_failure=signin_failure,
extra_context=None):
"""
in case of complete signin with openid
:attr request: request object
:attr openid_form: form use for openid signin, by default
`OpenidSigninForm`
:attr auth_form: form object used for legacy authentification.
by default AuthentificationForm form auser auth contrib.
:attr on_success: callbale, function used when openid auth success
:attr on_failure: callable, function used when openid auth failed.
:attr extra_context: A dictionary of variables to add to the template
context.
Any callable object in this dictionary will be called to produce the
end result which appears in the context.
"""
return complete(request, on_success, on_failure,
get_url_host(request) + reverse('user_complete_signin'),
redirect_field_name=redirect_field_name, openid_form=openid_form,
auth_form=auth_form, extra_context=extra_context)
def is_association_exist(openid_url):
""" test if an openid is already in database """
is_exist = True
try:
uassoc = UserAssociation.objects.get(openid_url__exact = str(openid_url))
except:
is_exist = False
return is_exist
def register_account(form, _openid):
""" create an account """
user = get_user_model().objects.create_user(form.cleaned_data['username'],
form.cleaned_data['email'])
user.backend = "django.contrib.auth.backends.ModelBackend"
oid_register.send(sender=user, openid=_openid)
return user
@not_authenticated
def register(request, template_name='authopenid/complete.html',
redirect_field_name=REDIRECT_FIELD_NAME,
register_form=OpenidRegisterForm, auth_form=AuthenticationForm,
register_account=register_account, send_email=True,
extra_context=None):
"""
register an openid.
If user is already a member he can associate its openid with
its account.
A new account could also be created and automaticaly associated
to the openid.
:attr request: request object
:attr template_name: string, name of template to use,
'authopenid/complete.html' by default
:attr redirect_field_name: string, field name used for redirect. by default
'next'
:attr register_form: form use to create a new account. By default
`OpenidRegisterForm`
:attr auth_form: form object used for legacy authentification.
by default `OpenidVerifyForm` form auser auth contrib.
:attr register_account: callback used to create a new account from openid.
It take the register_form as param.
:attr send_email: boolean, by default True. If True, an email will be sent
to the user.
:attr extra_context: A dictionary of variables to add to the template
context. Any callable object in this dictionary will be called to produce
the end result which appears in the context.
"""
is_redirect = False
redirect_to = request.REQUEST.get(redirect_field_name, '')
openid_ = request.session.get('openid', None)
if openid_ is None or not openid_:
return HttpResponseRedirect("%s?%s" % (reverse('user_signin'),
urllib.urlencode({
redirect_field_name: redirect_to })))
nickname = ''
email = ''
if openid_.sreg is not None:
nickname = openid_.sreg.get('nickname', '')
email = openid_.sreg.get('email', '')
if openid_.ax is not None and not nickname or not email:
if openid_.ax.get('http://schema.openid.net/namePerson/friendly', False):
nickname = openid_.ax.get('http://schema.openid.net/namePerson/friendly')[0]
if openid_.ax.get('http://schema.openid.net/contact/email', False):
email = openid_.ax.get('http://schema.openid.net/contact/email')[0]
form1 = register_form(initial={
'username': nickname,
'email': email,
})
form2 = auth_form(initial={
'username': nickname,
})
if request.POST:
user_ = None
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
if 'email' in request.POST.keys():
form1 = register_form(data=request.POST)
if form1.is_valid():
user_ = register_account(form1, openid_)
else:
form2 = auth_form(data=request.POST)
if form2.is_valid():
user_ = form2.get_user()
if user_ is not None:
# associate the user to openid
uassoc = UserAssociation(
openid_url=str(openid_),
user_id=user_.id
)
uassoc.save(send_email=send_email)
login(request, user_)
return HttpResponseRedirect(redirect_to)
return render(template_name, {
'form1': form1,
'form2': form2,
redirect_field_name: redirect_to,
'nickname': nickname,
'email': email
}, context_instance=_build_context(request, extra_context=extra_context))
@login_required
def signout(request, next_page=None, template_name='registration/logged_out.html'):
"""
signout from the website. Remove openid from session and kill it.
:attr request: request object
:attr next_page: default redirect page after logout
:attr template_name: string, name of template to use when next_page isn't set,
'registration/logged_out.html' by default
"""
try:
del request.session['openid']
except KeyError:
pass
next = request.GET.get('next')
logout(request)
if next is not None:
return HttpResponseRedirect(next)
if next_page is None:
return render(template_name, {
'title': _('Logged out')}, context_instance=RequestContext(request))
return HttpResponseRedirect(next_page or request.path)
def xrdf(request, template_name='authopenid/yadis.xrdf'):
""" view used to process the xrdf file"""
url_host = get_url_host(request)
return_to = [
"%s%s" % (url_host, reverse('user_complete_signin'))
]
response = render(template_name, {
'return_to': return_to
}, context_instance=RequestContext(request))
response['Content-Type'] = "application/xrds+xml"
response['X-XRDS-Location']= request.build_absolute_uri(reverse('oid_xrdf'))
return response
@login_required
def password_change(request,
template_name='authopenid/password_change_form.html',
set_password_form=SetPasswordForm,
change_password_form=PasswordChangeForm, post_change_redirect=None,
extra_context=None):
"""
View that allow a user to add a password to its account or change it.
:attr request: request object
:attr template_name: string, name of template to use,
'authopenid/password_change_form.html' by default
:attr set_password_form: form use to create a new password. By default
``django.contrib.auth.forms.SetPasswordForm``
:attr change_password_form: form objectto change passworf.
by default `django.contrib.auth.forms.SetPasswordForm.PasswordChangeForm`
form auser auth contrib.
:attr post_change_redirect: url used to redirect user after password change.
It take the register_form as param.
:attr extra_context: A dictionary of variables to add to the template context.
Any callable object in this dictionary will be called to produce the
end result which appears in the context.
"""
if post_change_redirect is None:
post_change_redirect = settings.LOGIN_REDIRECT_URL
set_password = False
if request.user.has_usable_password():
change_form = change_password_form
else:
set_password = True
change_form = set_password_form
if request.POST:
form = change_form(request.user, request.POST)
if form.is_valid():
form.save()
msg = urllib.quote(_("Password changed"))
redirect_to = "%s?%s" % (post_change_redirect,
urllib.urlencode({ "msg": msg }))
return HttpResponseRedirect(redirect_to)
else:
form = change_form(request.user)
return render(template_name, {
'form': form,
'set_password': set_password
}, context_instance=_build_context(request, extra_context=extra_context))
@login_required
def associate_failure(request, message,
template_failure="authopenid/associate.html",
openid_form=AssociateOpenID, redirect_name=None,
extra_context=None, **kwargs):
""" function used when new openid association fail"""
return render(template_failure, {
'form': openid_form(request.user),
'msg': message,
}, context_instance=_build_context(request, extra_context=extra_context))
@login_required
def associate_success(request, identity_url, openid_response,
redirect_field_name=REDIRECT_FIELD_NAME, send_email=True, **kwargs):
"""
function used when new openid association success. redirect the user
"""
openid_ = from_openid_response(openid_response)
openids = request.session.get('openids', [])
openids.append(openid_)
request.session['openids'] = openids
uassoc = UserAssociation(
openid_url=str(openid_),
user_id=request.user.id
)
uassoc.save(send_email=send_email)
redirect_to = request.GET.get(redirect_field_name, '')
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(redirect_to)
@login_required
def complete_associate(request, redirect_field_name=REDIRECT_FIELD_NAME,
template_failure='authopenid/associate.html',
openid_form=AssociateOpenID, redirect_name=None,
on_success=associate_success, on_failure=associate_failure,
send_email=True, extra_context=None):
""" in case of complete association with openid """
return complete(request, on_success, on_failure,
get_url_host(request) + reverse('user_complete_associate'),
redirect_field_name=redirect_field_name, openid_form=openid_form,
template_failure=template_failure, redirect_name=redirect_name,
send_email=send_email, extra_context=extra_context)
@login_required
def associate(request, template_name='authopenid/associate.html',
openid_form=AssociateOpenID, redirect_field_name=REDIRECT_FIELD_NAME,
on_failure=associate_failure, extra_context=None):
"""View that allow a user to associate a new openid to its account.
:attr request: request object
:attr template_name: string, name of template to use,
'authopenid/associate.html' by default
:attr openid_form: form use enter openid url. By default
``django_authopenid.forms.AssociateOpenID``
:attr redirect_field_name: string, field name used for redirect.
by default 'next'
:attr on_success: callbale, function used when openid auth success
:attr on_failure: callable, function used when openid auth failed.
by default ``django_authopenid.views.associate_failure`
:attr extra_context: A dictionary of variables to add to the template
context. A callable object in this dictionary will be called to produce
the end result which appears in the context.
"""
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.POST:
form = openid_form(request.user, data=request.POST)
if form.is_valid():
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
redirect_url = "%s%s?%s" % (
get_url_host(request),
reverse('user_complete_associate'),
urllib.urlencode({ redirect_field_name: redirect_to })
)
return ask_openid(request,
form.cleaned_data['openid_url'],
redirect_url,
on_failure=on_failure)
else:
form = openid_form(request.user)
return render(template_name, {
'form': form,
redirect_field_name: redirect_to
}, context_instance=_build_context(request, extra_context=extra_context))
@login_required
def dissociate(request, template_name="authopenid/dissociate.html",
dissociate_form=OpenidDissociateForm,
redirect_field_name=REDIRECT_FIELD_NAME,
default_redirect=settings.LOGIN_REDIRECT_URL, extra_context=None):
""" view used to dissociate an openid from an account """
redirect_to = request.REQUEST.get(redirect_field_name, '')
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = default_redirect
# get list of associated openids
rels = UserAssociation.objects.filter(user__id=request.user.id)
associated_openids = [rel.openid_url for rel in rels]
if len(associated_openids) == 1 and not request.user.has_usable_password():
msg = _("You can't remove this openid. "
"You should set a password first.")
return HttpResponseRedirect("%s?%s" % (redirect_to,
urllib.urlencode({ "msg": msg })))
if request.POST:
form = dissociate_form(request.POST)
if form.is_valid():
openid_url = form.cleaned_data['openid_url']
msg = ""
if openid_url not in associated_openids:
msg = _("%s is not associated to your account") % openid_url
if not msg:
UserAssociation.objects.get(openid_url__exact=openid_url).delete()
if openid_url == request.session.get('openid_url'):
del request.session['openid_url']
msg = _("openid removed.")
return HttpResponseRedirect("%s?%s" % (redirect_to,
urllib.urlencode({ "msg": msg })))
else:
openid_url = request.GET.get('openid_url', '')
if not openid_url:
msg = _("Invalid OpenID url.")
return HttpResponseRedirect("%s?%s" % (redirect_to,
urllib.urlencode({ "msg": msg })))
form = dissociate_form(initial={ 'openid_url': openid_url })
return render(template_name, {
"form": form,
"openid_url": openid_url
}, context_instance=_build_context(request, extra_context=extra_context))
|
{
"content_hash": "329dfbd1724b80300829fff1ec68b17f",
"timestamp": "",
"source": "github",
"line_count": 644,
"max_line_length": 94,
"avg_line_length": 40.65683229813665,
"alnum_prop": 0.6529045563915518,
"repo_name": "ramusus/django-authopenid",
"id": "23aefae8d52ff051a10b03d1751776174b3137da",
"size": "26826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_authopenid/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3815"
},
{
"name": "HTML",
"bytes": "14318"
},
{
"name": "JavaScript",
"bytes": "7919"
},
{
"name": "Python",
"bytes": "91738"
}
],
"symlink_target": ""
}
|
"""Support for esphome devices."""
import asyncio
import logging
import math
from typing import Any, Callable, Dict, List, Optional
from aioesphomeapi import (
APIClient, APIConnectionError, DeviceInfo, EntityInfo, EntityState,
HomeassistantServiceCall, UserService, UserServiceArgType)
import voluptuous as vol
from homeassistant import const
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_PORT, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import Event, State, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.storage import Store
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
# Import config flow so that it's added to the registry
from .config_flow import EsphomeFlowHandler # noqa
from .entry_data import (
DATA_KEY, DISPATCHER_ON_DEVICE_UPDATE, DISPATCHER_ON_LIST,
DISPATCHER_ON_STATE, DISPATCHER_REMOVE_ENTITY, DISPATCHER_UPDATE_ENTITY,
RuntimeEntryData)
DOMAIN = 'esphome'
_LOGGER = logging.getLogger(__name__)
STORAGE_KEY = 'esphome.{}'
STORAGE_VERSION = 1
# No config schema - only configuration entry
CONFIG_SCHEMA = vol.Schema({}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Stub to allow setting up this component.
Configuration through YAML is not supported at this time.
"""
return True
async def async_setup_entry(hass: HomeAssistantType,
entry: ConfigEntry) -> bool:
"""Set up the esphome component."""
hass.data.setdefault(DATA_KEY, {})
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
password = entry.data[CONF_PASSWORD]
cli = APIClient(hass.loop, host, port, password,
client_info="Home Assistant {}".format(const.__version__))
# Store client in per-config-entry hass.data
store = Store(hass, STORAGE_VERSION, STORAGE_KEY.format(entry.entry_id),
encoder=JSONEncoder)
entry_data = hass.data[DATA_KEY][entry.entry_id] = RuntimeEntryData(
client=cli,
entry_id=entry.entry_id,
store=store,
)
async def on_stop(event: Event) -> None:
"""Cleanup the socket client on HA stop."""
await _cleanup_instance(hass, entry)
entry_data.cleanup_callbacks.append(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_stop)
)
@callback
def async_on_state(state: EntityState) -> None:
"""Send dispatcher updates when a new state is received."""
entry_data.async_update_state(hass, state)
@callback
def async_on_service_call(service: HomeassistantServiceCall) -> None:
"""Call service when user automation in ESPHome config is triggered."""
domain, service_name = service.service.split('.', 1)
service_data = service.data
if service.data_template:
try:
data_template = {key: Template(value) for key, value in
service.data_template.items()}
template.attach(hass, data_template)
service_data.update(template.render_complex(
data_template, service.variables))
except TemplateError as ex:
_LOGGER.error('Error rendering data template: %s', ex)
return
if service.is_event:
# ESPHome uses servicecall packet for both events and service calls
# Ensure the user can only send events of form 'esphome.xyz'
if domain != 'esphome':
_LOGGER.error("Can only generate events under esphome "
"domain!")
return
hass.bus.async_fire(service.service, service_data)
else:
hass.async_create_task(hass.services.async_call(
domain, service_name, service_data, blocking=True))
async def send_home_assistant_state(entity_id: str, _,
new_state: Optional[State]) -> None:
"""Forward Home Assistant states to ESPHome."""
if new_state is None:
return
await cli.send_home_assistant_state(entity_id, new_state.state)
@callback
def async_on_state_subscription(entity_id: str) -> None:
"""Subscribe and forward states for requested entities."""
unsub = async_track_state_change(
hass, entity_id, send_home_assistant_state)
entry_data.disconnect_callbacks.append(unsub)
# Send initial state
hass.async_create_task(send_home_assistant_state(
entity_id, None, hass.states.get(entity_id)))
async def on_login() -> None:
"""Subscribe to states and list entities on successful API login."""
try:
entry_data.device_info = await cli.device_info()
entry_data.available = True
await _async_setup_device_registry(hass, entry,
entry_data.device_info)
entry_data.async_update_device_state(hass)
entity_infos, services = await cli.list_entities_services()
await entry_data.async_update_static_infos(
hass, entry, entity_infos)
await _setup_services(hass, entry_data, services)
await cli.subscribe_states(async_on_state)
await cli.subscribe_service_calls(async_on_service_call)
await cli.subscribe_home_assistant_states(
async_on_state_subscription)
hass.async_create_task(entry_data.async_save_to_store())
except APIConnectionError as err:
_LOGGER.warning("Error getting initial data: %s", err)
# Re-connection logic will trigger after this
await cli.disconnect()
try_connect = await _setup_auto_reconnect_logic(hass, cli, entry, host,
on_login)
async def complete_setup() -> None:
"""Complete the config entry setup."""
infos, services = await entry_data.async_load_from_store()
await entry_data.async_update_static_infos(hass, entry, infos)
await _setup_services(hass, entry_data, services)
# Create connection attempt outside of HA's tracked task in order
# not to delay startup.
hass.loop.create_task(try_connect(is_disconnect=False))
hass.async_create_task(complete_setup())
return True
async def _setup_auto_reconnect_logic(hass: HomeAssistantType,
cli: APIClient,
entry: ConfigEntry, host: str, on_login):
"""Set up the re-connect logic for the API client."""
async def try_connect(tries: int = 0, is_disconnect: bool = True) -> None:
"""Try connecting to the API client. Will retry if not successful."""
if entry.entry_id not in hass.data[DOMAIN]:
# When removing/disconnecting manually
return
data = hass.data[DOMAIN][entry.entry_id] # type: RuntimeEntryData
for disconnect_cb in data.disconnect_callbacks:
disconnect_cb()
data.disconnect_callbacks = []
data.available = False
data.async_update_device_state(hass)
if is_disconnect:
# This can happen often depending on WiFi signal strength.
# So therefore all these connection warnings are logged
# as infos. The "unavailable" logic will still trigger so the
# user knows if the device is not connected.
_LOGGER.info("Disconnected from ESPHome API for %s", host)
if tries != 0:
# If not first re-try, wait and print message
# Cap wait time at 1 minute. This is because while working on the
# device (e.g. soldering stuff), users don't want to have to wait
# a long time for their device to show up in HA again (this was
# mentioned a lot in early feedback)
#
# In the future another API will be set up so that the ESP can
# notify HA of connectivity directly, but for new we'll use a
# really short reconnect interval.
tries = min(tries, 10) # prevent OverflowError
wait_time = int(round(min(1.8**tries, 60.0)))
_LOGGER.info("Trying to reconnect in %s seconds", wait_time)
await asyncio.sleep(wait_time)
try:
await cli.connect(on_stop=try_connect, login=True)
except APIConnectionError as error:
_LOGGER.info("Can't connect to ESPHome API for %s: %s",
host, error)
# Schedule re-connect in event loop in order not to delay HA
# startup. First connect is scheduled in tracked tasks.
data.reconnect_task = hass.loop.create_task(
try_connect(tries + 1, is_disconnect=False))
else:
_LOGGER.info("Successfully connected to %s", host)
hass.async_create_task(on_login())
return try_connect
async def _async_setup_device_registry(hass: HomeAssistantType,
entry: ConfigEntry,
device_info: DeviceInfo):
"""Set up device registry feature for a particular config entry."""
sw_version = device_info.esphome_version
if device_info.compilation_time:
sw_version += ' ({})'.format(device_info.compilation_time)
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={
(dr.CONNECTION_NETWORK_MAC, device_info.mac_address)
},
name=device_info.name,
manufacturer='espressif',
model=device_info.model,
sw_version=sw_version,
)
async def _register_service(hass: HomeAssistantType,
entry_data: RuntimeEntryData,
service: UserService):
service_name = '{}_{}'.format(entry_data.device_info.name, service.name)
schema = {}
for arg in service.args:
schema[vol.Required(arg.name)] = {
UserServiceArgType.BOOL: cv.boolean,
UserServiceArgType.INT: vol.Coerce(int),
UserServiceArgType.FLOAT: vol.Coerce(float),
UserServiceArgType.STRING: cv.string,
UserServiceArgType.BOOL_ARRAY: [cv.boolean],
UserServiceArgType.INT_ARRAY: [vol.Coerce(int)],
UserServiceArgType.FLOAT_ARRAY: [vol.Coerce(float)],
UserServiceArgType.STRING_ARRAY: [cv.string],
}[arg.type_]
async def execute_service(call):
await entry_data.client.execute_service(service, call.data)
hass.services.async_register(DOMAIN, service_name, execute_service,
vol.Schema(schema))
async def _setup_services(hass: HomeAssistantType,
entry_data: RuntimeEntryData,
services: List[UserService]):
old_services = entry_data.services.copy()
to_unregister = []
to_register = []
for service in services:
if service.key in old_services:
# Already exists
matching = old_services.pop(service.key)
if matching != service:
# Need to re-register
to_unregister.append(matching)
to_register.append(service)
else:
# New service
to_register.append(service)
for service in old_services.values():
to_unregister.append(service)
entry_data.services = {serv.key: serv for serv in services}
for service in to_unregister:
service_name = '{}_{}'.format(entry_data.device_info.name,
service.name)
hass.services.async_remove(DOMAIN, service_name)
for service in to_register:
await _register_service(hass, entry_data, service)
async def _cleanup_instance(hass: HomeAssistantType,
entry: ConfigEntry) -> RuntimeEntryData:
"""Cleanup the esphome client if it exists."""
data = hass.data[DATA_KEY].pop(entry.entry_id) # type: RuntimeEntryData
if data.reconnect_task is not None:
data.reconnect_task.cancel()
for disconnect_cb in data.disconnect_callbacks:
disconnect_cb()
for cleanup_callback in data.cleanup_callbacks:
cleanup_callback()
await data.client.disconnect()
return data
async def async_unload_entry(hass: HomeAssistantType,
entry: ConfigEntry) -> bool:
"""Unload an esphome config entry."""
entry_data = await _cleanup_instance(hass, entry)
tasks = []
for platform in entry_data.loaded_platforms:
tasks.append(hass.config_entries.async_forward_entry_unload(
entry, platform))
if tasks:
await asyncio.wait(tasks)
return True
async def platform_async_setup_entry(hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities,
*,
component_key: str,
info_type,
entity_type,
state_type
) -> None:
"""Set up an esphome platform.
This method is in charge of receiving, distributing and storing
info and state updates.
"""
entry_data = hass.data[DOMAIN][entry.entry_id] # type: RuntimeEntryData
entry_data.info[component_key] = {}
entry_data.state[component_key] = {}
@callback
def async_list_entities(infos: List[EntityInfo]):
"""Update entities of this platform when entities are listed."""
old_infos = entry_data.info[component_key]
new_infos = {}
add_entities = []
for info in infos:
if not isinstance(info, info_type):
# Filter out infos that don't belong to this platform.
continue
if info.key in old_infos:
# Update existing entity
old_infos.pop(info.key)
else:
# Create new entity
entity = entity_type(entry.entry_id, component_key, info.key)
add_entities.append(entity)
new_infos[info.key] = info
# Remove old entities
for info in old_infos.values():
entry_data.async_remove_entity(hass, component_key, info.key)
entry_data.info[component_key] = new_infos
async_add_entities(add_entities)
signal = DISPATCHER_ON_LIST.format(entry_id=entry.entry_id)
entry_data.cleanup_callbacks.append(
async_dispatcher_connect(hass, signal, async_list_entities)
)
@callback
def async_entity_state(state: EntityState):
"""Notify the appropriate entity of an updated state."""
if not isinstance(state, state_type):
return
entry_data.state[component_key][state.key] = state
entry_data.async_update_entity(hass, component_key, state.key)
signal = DISPATCHER_ON_STATE.format(entry_id=entry.entry_id)
entry_data.cleanup_callbacks.append(
async_dispatcher_connect(hass, signal, async_entity_state)
)
def esphome_state_property(func):
"""Wrap a state property of an esphome entity.
This checks if the state object in the entity is set, and
prevents writing NAN values to the Home Assistant state machine.
"""
@property
def _wrapper(self):
# pylint: disable=protected-access
if self._state is None:
return None
val = func(self)
if isinstance(val, float) and math.isnan(val):
# Home Assistant doesn't use NAN values in state machine
# (not JSON serializable)
return None
return val
return _wrapper
class EsphomeEnumMapper:
"""Helper class to convert between hass and esphome enum values."""
def __init__(self, func: Callable[[], Dict[int, str]]):
"""Construct a EsphomeEnumMapper."""
self._func = func
def from_esphome(self, value: int) -> str:
"""Convert from an esphome int representation to a hass string."""
return self._func()[value]
def from_hass(self, value: str) -> int:
"""Convert from a hass string to a esphome int representation."""
inverse = {v: k for k, v in self._func().items()}
return inverse[value]
def esphome_map_enum(func: Callable[[], Dict[int, str]]):
"""Map esphome int enum values to hass string constants.
This class has to be used as a decorator. This ensures the aioesphomeapi
import is only happening at runtime.
"""
return EsphomeEnumMapper(func)
class EsphomeEntity(Entity):
"""Define a generic esphome entity."""
def __init__(self, entry_id: str, component_key: str, key: int):
"""Initialize."""
self._entry_id = entry_id
self._component_key = component_key
self._key = key
self._remove_callbacks = [] # type: List[Callable[[], None]]
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
kwargs = {
'entry_id': self._entry_id,
'component_key': self._component_key,
'key': self._key,
}
self._remove_callbacks.append(
async_dispatcher_connect(self.hass,
DISPATCHER_UPDATE_ENTITY.format(**kwargs),
self._on_update)
)
self._remove_callbacks.append(
async_dispatcher_connect(self.hass,
DISPATCHER_REMOVE_ENTITY.format(**kwargs),
self.async_remove)
)
self._remove_callbacks.append(
async_dispatcher_connect(
self.hass, DISPATCHER_ON_DEVICE_UPDATE.format(**kwargs),
self.async_schedule_update_ha_state)
)
async def _on_update(self) -> None:
"""Update the entity state when state or static info changed."""
self.async_schedule_update_ha_state()
async def async_will_remove_from_hass(self) -> None:
"""Unregister callbacks."""
for remove_callback in self._remove_callbacks:
remove_callback()
self._remove_callbacks = []
@property
def _entry_data(self) -> RuntimeEntryData:
return self.hass.data[DATA_KEY][self._entry_id]
@property
def _static_info(self) -> EntityInfo:
return self._entry_data.info[self._component_key][self._key]
@property
def _device_info(self) -> DeviceInfo:
return self._entry_data.device_info
@property
def _client(self) -> APIClient:
return self._entry_data.client
@property
def _state(self) -> Optional[EntityState]:
try:
return self._entry_data.state[self._component_key][self._key]
except KeyError:
return None
@property
def available(self) -> bool:
"""Return if the entity is available."""
device = self._device_info
if device.has_deep_sleep:
# During deep sleep the ESP will not be connectable (by design)
# For these cases, show it as available
return True
return self._entry_data.available
@property
def unique_id(self) -> Optional[str]:
"""Return a unique id identifying the entity."""
if not self._static_info.unique_id:
return None
return self._static_info.unique_id
@property
def device_info(self) -> Dict[str, Any]:
"""Return device registry information for this entity."""
return {
'connections': {(dr.CONNECTION_NETWORK_MAC,
self._device_info.mac_address)}
}
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._static_info.name
@property
def should_poll(self) -> bool:
"""Disable polling."""
return False
|
{
"content_hash": "c7109282f78080053c5baa9fd21c8b9f",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 79,
"avg_line_length": 38.0348623853211,
"alnum_prop": 0.6076511167928988,
"repo_name": "jabesq/home-assistant",
"id": "db5aeea2aa1faecc06112f6311b7353bc505e316",
"size": "20729",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/esphome/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16238292"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17615"
}
],
"symlink_target": ""
}
|
"""Copyright 2020 The Google Earth Engine Community Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import h5py
import numpy as np
import pandas as pd
l2b_variables_for_l2a = ('local_beam_azimuth', 'local_beam_elevation')
def add_shot_number_breakdown(df: pd.DataFrame) -> None:
"""Adds fields obtained by breaking down shot_number.
Example: from 154341234599141100 we obtain:
orbit_number = 15434
beam_number = 12 (we ignore it)
minor_frame_number = 345
shot_number_within_beam = 99141101
Args:
df: pd.DataFrame
"""
# It's simpler to use substrings than to do math.
df['shot_number_within_beam'] = [
int(str(x)[-8:]) for x in df['shot_number']]
df['minor_frame_number'] = [int(str(x)[-11:-8]) for x in df['shot_number']]
# beam number, [-13:-11], is already in the 'beam' property
df['orbit_number'] = [int(str(x)[:-13]) for x in df['shot_number']]
def hdf_to_df(
hdf_fh: h5py.File, beam_key: str, var: str, df: pd.DataFrame) -> None:
"""Copies data for a single var from an HDF file to a Pandas DataFrame.
Args:
hdf_fh: h5 file handle
beam_key: a string like BEAM0110, first part of the HDF variable key
var: second part of the HDF variable key (also used for the dataframe key)
df: output Pandsa DataFrame
"""
if var.startswith('#'):
return
hdf_key = f'{beam_key}/{var}'
df_key = var.split('/')[-1]
ds = hdf_fh[hdf_key]
df[df_key] = ds[:]
if len(df[df_key]) and isinstance(df[df_key][0], bytes):
df[df_key] = df[df_key].apply(lambda x: x.decode())
df[df_key].replace([np.inf, -np.inf], np.nan, inplace=True)
if ds.attrs.get('_FillValue') is not None:
# We need to use pd.NA that works with integer types (np.nan does not)
df[df_key].replace(ds.attrs.get('_FillValue'), pd.NA, inplace=True)
|
{
"content_hash": "afe99826321d23acb58cf4192776bbc7",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 78,
"avg_line_length": 35.796875,
"alnum_prop": 0.6870362287210825,
"repo_name": "google/earthengine-community",
"id": "8c4da1c4956efd9ab0c1979106b04491a4e9dc16",
"size": "2291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datasets/gedi_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "60063"
},
{
"name": "Jupyter Notebook",
"bytes": "460074"
},
{
"name": "Python",
"bytes": "72781"
},
{
"name": "Shell",
"bytes": "1935"
}
],
"symlink_target": ""
}
|
'''
CYK algorithm for Context Free Language
Author: Chenguang Zhu
CS154, Stanford University
'''
import sys,traceback
import os
import string
maxProductionNum = 100 #max number of productions
VarNum = 4
production = [[0] * 3 for i in range(maxProductionNum+1)]
'''Prouductions in Chomsky Normal Form (CNF)
production[i][0] is the number for the variable (0~3, 0: S 1: A, 2: B, 3: C)
If this production is A->BC (two variables), then production[i][1] and production[i][2] will contain the numbers for these two variables
If this production is A->a (a single terminal), then production[i][1] will contain the number for the terminal (0 or 1, 0: a, 1: b), production[i][2]=-1'''
X = [[[False]*3 for i in range(10)] for j in range(10)]
'''X[i][j][s]=true if and only if variable s (0~3, 0: S 1: A, 2: B, 3: C) is in X_ij defined in CYK
Suppose the length of string to be processed is L, then 0<=i<=j<L '''
#check whether (a,b,c) exists in production
def existProd(a, b, c):
global production
for i in range(len(production)):
if ((production[i][0]==a) and
(production[i][1]==b) and
(production[i][2]==c)):
return True
return False
'''CYK algorithm
Calculate the array X
w is the string to be processed'''
def calcCYK(w):
global X
global VarNum
L=len(w)
X=[[[False]*VarNum for i in range(L)] for j in range(L)]
# X=[[[] for i in range(L)] for j in range(L)]
for x in range(L):
calc_cell_basic(x, w)
for dist in range(1,L):
calc_row(dist, L)
tmp = [[lengthify(i) for i in j] for j in X]
X = tmp
def calc_row(dist, l):
global X
for i in range(l - dist):
head = i
tail = i + dist
calc_cell(head, tail)
def lengthify(xs):
global VarNum
result = [False] * VarNum
i = 0
for x in xs:
result[i] = x
i += 1
return result
def calc_cell_basic(col, w):
global X
ww = w[col]
poss = [False] * VarNum
for i in range(7):
if existProd(i,ww,-1):
poss[i] = True
X[col][col] = poss
def prod(xs, ys):
result = []
for x in range(len(xs)):
for y in range(len(ys)):
if xs[x] and ys[y]:
for i in range(7):
if existProd(i, x, y):
result.append(i)
return result
def calc_cell(head, tail):
global X
poss = [False] * VarNum
for i in range(tail - head):
xs = X[head][head + i]
ys = X[head + i + 1][tail]
for i in prod(xs, ys):
poss[i] = True
X[head][tail] = poss
def Start(filename):
global X
global VarNum
global production
result=''
#read data case line by line from file
try:
br=open(filename,'r')
#example on Page 8 of lecture 15_CFL5
production=[[0]*3 for i in range(7)]
production[0][0]=0; production[0][1]=1; production[0][2]=2 #S->AB
production[1][0]=1; production[1][1]=2; production[1][2]=3 #A->BC
production[2][0]=1; production[2][1]=0; production[2][2]=-1 #A->a
production[3][0]=2; production[3][1]=1; production[3][2]=3 #B->AC
production[4][0]=2; production[4][1]=1; production[4][2]=-1 #B->b
production[5][0]=3; production[5][1]=0; production[5][2]=-1 #C->a
production[6][0]=3; production[6][1]=1; production[6][2]=-1 #C->b
result=''
#Read File Line By Line
for string in br:
string=string.strip()
print 'Processing '+string+'...'
length=len(string)
w=[0]*length
for i in range(length):
w[i]=ord(string[i])-ord('a') #convert 'a' to 0 and 'b' to 1
#Use CYK algorithm to calculate X
calcCYK(w)
#Get/print the full table X
for step in range(length-1,-1,-1):
for i in range(length-step):
j=i+step
for k in range(VarNum):
if (X[i][j][k]):
result=result+str(k)
result=result+' '
result=result+'\n'
#Close the input stream
br.close()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,limit=2, file=sys.stdout)
result=result+'error'
return result
def main(filepath):
return Start(filepath)
if __name__ == '__main__':
main(sys.argv[1])
|
{
"content_hash": "18aefdfafdaee932cdb66c4e0ca3c064",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 157,
"avg_line_length": 29.098765432098766,
"alnum_prop": 0.53097157403479,
"repo_name": "shouya/thinking-dumps",
"id": "ad4eae53114e02d025b84e1f0d28b6700f185d33",
"size": "4714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automata/homework/project2/CYK.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Agda",
"bytes": "4987"
},
{
"name": "Coq",
"bytes": "2433428"
},
{
"name": "Dockerfile",
"bytes": "388"
},
{
"name": "Emacs Lisp",
"bytes": "954"
},
{
"name": "HTML",
"bytes": "12397"
},
{
"name": "Haskell",
"bytes": "230181"
},
{
"name": "Idris",
"bytes": "10945"
},
{
"name": "Java",
"bytes": "57778"
},
{
"name": "Jupyter Notebook",
"bytes": "29055"
},
{
"name": "Lua",
"bytes": "777"
},
{
"name": "MATLAB",
"bytes": "232519"
},
{
"name": "Makefile",
"bytes": "1743"
},
{
"name": "Python",
"bytes": "105122"
},
{
"name": "Racket",
"bytes": "101402"
},
{
"name": "Ruby",
"bytes": "2015"
},
{
"name": "Rust",
"bytes": "1318"
},
{
"name": "Scala",
"bytes": "541695"
},
{
"name": "Scheme",
"bytes": "15070"
},
{
"name": "TeX",
"bytes": "92105"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project Compiler
#
import os, sys, re, shutil, time, run, sgmllib, codecs, tempfile, subprocess
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
sys.path.append(os.path.abspath(os.path.join(template_dir,'..')))
sys.path.append(os.path.abspath(os.path.join(template_dir,'..', 'common')))
from tiapp import *
import jspacker
from csspacker import CSSPacker
import traceback
try:
import json
except:
import simplejson as json
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store', '_svn'];
ignoreDirs = ['iphone', 'android', 'mobileweb', '.git', '.svn', 'CVS'];
HEADER = """
#import <Foundation/Foundation.h>
"""
INTERFACE_HEADER= """
@interface ApplicationRouting : NSObject {
}
+ (NSData*) resolveAppAsset:(NSString*)path;
"""
IMPL_HEADER= """#import "ApplicationRouting.h"
extern NSData* filterDataInRange(NSData* thedata, NSRange range);
@implementation ApplicationRouting
"""
FOOTER ="""
@end
"""
MODULE_IMPL_HEADER = """#import "ApplicationMods.h"
@implementation ApplicationMods
+ (NSArray*) compiledMods
{
NSMutableArray *modules = [NSMutableArray array];
"""
class HTMLParser(sgmllib.SGMLParser):
def parse(self, s):
self.feed(s)
self.close()
def __init__(self, verbose=0):
sgmllib.SGMLParser.__init__(self, verbose)
self.scripts = []
def start_script(self, attributes):
for name, value in attributes:
if name == "src":
self.scripts.append(value)
def get_scripts(self):
return self.scripts
def read_module_properties(dir):
file = os.path.join(dir,'manifest')
dict = {}
if os.path.exists(file):
contents = open(file).read()
for line in contents.splitlines(True):
if line[0:1]=='#': continue
idx = line.find(':')
if idx==-1: continue
k=line[0:idx]
v=line[idx+1:].strip()
dict[k]=v
return dict
#Convert non-unicode obj to unicode encoded in utf-8.
def to_unicode_or_not(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
# Need to pre-parse xcconfig files to mangle variable names, and then
# dump them into a map so that we can re-assemble them later
def parse_xcconfig(xcconfig, moduleId, variables):
module_xcconfig = open(xcconfig)
new_xcconfig = ''
local_variables = {}
prefix = moduleId.upper().replace('.','_')
for line in module_xcconfig:
# Strip comments
comment = line.find('//')
if comment != -1:
line = line[0:comment]
# Generate new varname / value pairings
# The regular expression parses a valid line into components
# <var>=<value>
# <var>[<key>=<keyvalue>]=<value>
# e.g.
# OTHER_LDFLAGS=-framework EventKit
# OTHER_LDFLAGS[sdk=iphoneos4*]=-liconv
splitline = re.split('(([^\[=]+)(\[[^\]]+\])?) *=? *(.+)', line)
if len(splitline) >= 5:
varname = splitline[1]
value = splitline[4]
name = prefix + '_' + varname.strip()
name = re.sub(r'[^\w]', '_', name)
local_variables[varname] = name
new_xcconfig += name + '=' + value + '\n'
module_xcconfig.close()
# Update any local variable references with new varname
# and add variables to the global variables map
for (varname, name) in local_variables.iteritems():
source = '$(%s)' % varname
target = '$(%s)' % name
new_xcconfig = new_xcconfig.replace(source,target)
# Add new varname to the list
if not varname in variables:
variables[varname] = [name]
else:
variables[varname].append(name)
new_xcconfig += '\n'
return new_xcconfig
def softlink_resources(source,target,use_ignoreDirs=True):
if not os.path.exists(target):
os.makedirs(target)
for file in os.listdir(source):
if (use_ignoreDirs and (file in ignoreDirs)) or (file in ignoreFiles):
continue
from_ = to_unicode_or_not(os.path.join(source, file))
to_ = to_unicode_or_not(os.path.join(target, file))
if os.path.isdir(from_):
print "[DEBUG] creating: %s" % (to_)
softlink_resources(from_,to_,use_ignoreDirs)
else:
print "[DEBUG] linking: %s to %s" % (from_,to_)
if os.path.exists(to_):
if os.path.islink(to_):
os.remove(to_)
os.symlink(from_, to_)
else:
os.symlink(from_, to_)
def clear_application_routing(classes_dir):
impf = open(os.path.join(classes_dir,'ApplicationRouting.m'),'w+')
impf.write(HEADER)
impf.write(IMPL_HEADER)
impf.write("+ (NSData*) resolveAppAsset:(NSString*)path;\n{\n")
impf.write(" return nil;\n")
impf.write('}\n')
impf.write(FOOTER)
impf.close()
def softlink_for_simulator(project_dir,app_dir):
resources_dir = os.path.join(project_dir,'Resources')
iphone_resources_dir = os.path.join(resources_dir,'iphone')
iphone_platform_dir = os.path.join(project_dir,'platform','iphone')
softlink_resources(resources_dir,app_dir)
if(os.path.exists(iphone_resources_dir)):
softlink_resources(iphone_resources_dir,app_dir,False)
dest_mod_dir = os.path.join(app_dir,'modules')
src_mod_dir = os.path.join(project_dir,'modules')
if(os.path.exists(src_mod_dir)):
softlink_resources(src_mod_dir,dest_mod_dir)
src_mod_iphone_dir = os.path.join(src_mod_dir,'iphone')
if(os.path.exists(src_mod_iphone_dir)):
softlink_resources(os.path.join(project_dir,'modules','iphone'),dest_mod_dir,False)
iphone_classes_dir = os.path.join(project_dir,'build','iphone','Classes')
clear_application_routing(iphone_classes_dir)
#
# TODO/FIXME
#
# - encryptor
#
class Compiler(object):
def __init__(self, project_dir, appid, name, deploytype):
self.deploytype = deploytype
self.project_dir = project_dir
self.project_name = name
self.appid = appid
if deploytype != 'export-build' and deploytype != 'commonjs':
self.iphone_dir = os.path.join(project_dir,'build','iphone')
else:
self.iphone_dir = project_dir
self.classes_dir = os.path.join(self.iphone_dir,'Classes')
self.assets_dir = os.path.join(self.iphone_dir,'assets')
if deploytype == 'commonjs':
self.assets_dir = os.path.join(self.iphone_dir, '..', 'assets')
self.modules = []
self.modules_metadata = []
self.exports = []
# for now, these are required
self.defines = ['USE_TI_ANALYTICS','USE_TI_NETWORK','USE_TI_PLATFORM','USE_TI_UI', 'USE_TI_API']
def compileProject(self,xcode=False,devicefamily='ios',iphone_version='iphoneos',silent=False,sdk=None):
tiapp_xml = os.path.join(self.project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
if sdk is None:
sdk_version = os.path.basename(os.path.abspath(os.path.join(template_dir,'../')))
else:
sdk_version = sdk
if xcode:
app_name = os.environ['FULL_PRODUCT_NAME']
app_dir = os.path.join(os.environ['TARGET_BUILD_DIR'],os.environ['CONTENTS_FOLDER_PATH'])
else:
target = 'Debug'
if self.deploytype == 'production':
target = 'Release'
app_name = self.project_name+'.app'
app_folder_name = '%s-iphoneos' % target
app_dir = os.path.abspath(os.path.join(self.iphone_dir,'build',app_folder_name,app_name))
if not silent:
print "[INFO] Titanium SDK version: %s" % sdk_version
print "[INFO] iPhone Device family: %s" % devicefamily
print "[INFO] iPhone SDK version: %s" % iphone_version
if self.deploytype != 'export-build':
main_template_file = os.path.join(template_dir,'main.m')
main_template = codecs.open(main_template_file, encoding='utf-8').read()
main_template = main_template.replace('__PROJECT_NAME__',self.project_name)
main_template = main_template.replace('__PROJECT_ID__',self.appid)
main_template = main_template.replace('__DEPLOYTYPE__',self.deploytype)
main_template = main_template.replace('__APP_ID__',self.appid)
main_template = main_template.replace('__APP_ANALYTICS__',ti.properties['analytics'])
main_template = main_template.replace('__APP_PUBLISHER__',ti.properties['publisher'])
main_template = main_template.replace('__APP_URL__',ti.properties['url'])
main_template = main_template.replace('__APP_NAME__',ti.properties['name'])
main_template = main_template.replace('__APP_VERSION__',ti.properties['version'])
main_template = main_template.replace('__APP_DESCRIPTION__',ti.properties['description'])
main_template = main_template.replace('__APP_COPYRIGHT__',ti.properties['copyright'])
main_template = main_template.replace('__APP_GUID__',ti.properties['guid'])
main_template = main_template.replace('__APP_RESOURCE_DIR__','')
main_template_out = os.path.join(self.iphone_dir,'main.m')
main_file = codecs.open(main_template_out,'w+',encoding='utf-8')
main_file_contents = main_file.read()
if main_file_contents!=main_template:
main_file.write(main_template)
main_file.close()
resources_dir = os.path.join(self.project_dir,'Resources')
iphone_resources_dir = os.path.join(resources_dir,'iphone')
iphone_platform_dir = os.path.join(self.project_dir,'platform','iphone')
# copy in any resources in our module like icons
# NOTE: This means that any JS-only modules in the local project
# are hashed up and dumped into the export.
has_modules = False
missing_modules, modules, module_js = ([], [], [])
module_js_dir = os.path.join(self.project_dir,'modules')
if os.path.exists(module_js_dir):
for file in os.listdir(module_js_dir):
if file.endswith('.js'):
module_js.append({'from':os.path.join(module_js_dir,file),'to':os.path.join(app_dir,file),'path':'modules/'+file})
if self.deploytype != 'export-build':
# Have to load the module detection here, in order to
# prevent distributing even MORE stuff in export/transport
sys.path.append(os.path.join(template_dir,'../module'))
from module import ModuleDetector
detector = ModuleDetector(self.project_dir)
missing_modules, modules = detector.find_app_modules(ti, 'iphone', self.deploytype)
# we have to copy these even in simulator given the path difference
if os.path.exists(app_dir):
self.copy_resources([iphone_resources_dir],app_dir,False)
if os.path.exists(app_dir):
self.copy_resources([iphone_platform_dir],app_dir,False)
# generate the includes for all compiled modules
xcconfig_c = "// this is a generated file - DO NOT EDIT\n\n"
if len(modules) > 0:
mods = open(os.path.join(self.classes_dir,'ApplicationMods.m'),'w+')
variables = {}
mods.write(MODULE_IMPL_HEADER)
for module in modules:
if module.js:
# CommonJS module
module_js.append({'from': module.js, 'path': 'modules/' + os.path.basename(module.js)})
module_id = module.manifest.moduleid.lower()
module_name = module.manifest.name.lower()
module_version = module.manifest.version
module_guid = ''
module_licensekey = ''
if module.manifest.has_property('guid'):
module_guid = module.manifest.guid
if module.manifest.has_property('licensekey'):
module_licensekey = module.manifest.licensekey
self.modules_metadata.append({'guid':module_guid,'name':module_name,'id':module_id,'dir':module.path,'version':module_version,'licensekey':module_licensekey})
xcfile = module.get_resource('module.xcconfig')
if os.path.exists(xcfile):
xcconfig_contents = parse_xcconfig(xcfile, module_id, variables)
xcconfig_c += xcconfig_contents
xcfile = os.path.join(self.project_dir,'modules','iphone',"%s.xcconfig" % module_name)
if os.path.exists(xcfile):
xcconfig_contents = parse_xcconfig(xcfile, module_id, variables)
xcconfig_c += xcconfig_contents
mods.write(" [modules addObject:[NSDictionary dictionaryWithObjectsAndKeys:@\"%s\",@\"name\",@\"%s\",@\"moduleid\",@\"%s\",@\"version\",@\"%s\",@\"guid\",@\"%s\",@\"licensekey\",nil]];\n" % (module_name,module_id,module_version,module_guid,module_licensekey));
# Load export symbols from modules...
metadata_path = os.path.join(module.path, 'metadata.json')
if os.path.exists(metadata_path):
self.load_metadata(metadata_path)
mods.write(" return modules;\n")
mods.write("}\n")
mods.write(FOOTER)
mods.close()
for (name, values) in variables.iteritems():
xcconfig_c += name + '=$(inherited) '
for value in values:
xcconfig_c += '$(%s) ' % value
xcconfig_c += '\n'
has_modules = True
xcconfig = os.path.join(self.iphone_dir,"module.xcconfig")
make_xcc = True
if os.path.exists(xcconfig):
existing_xcc = open(xcconfig).read()
# only copy if different so we don't trigger re-compile in xcode
make_xcc = existing_xcc!=xcconfig_c
if make_xcc:
xcconfig = open(xcconfig,'w')
xcconfig.write(xcconfig_c)
xcconfig.close()
#endif deploytype != 'export-build'
else:
# ... And for exported projects, load export symbols from
# the 'metadata' dir.
metadata_dir = os.path.join(self.iphone_dir, 'metadata')
if os.path.isdir(metadata_dir):
for file in os.listdir(metadata_dir):
self.load_metadata(os.path.join(metadata_dir,file))
if self.deploytype=='simulator' or self.deploytype=='export':
shutil.copy(os.path.join(template_dir,'Classes','defines.h'),os.path.join(self.classes_dir,'defines.h'))
if self.deploytype!='development' or has_modules:
if os.path.exists(app_dir) and self.deploytype != 'development':
self.copy_resources([resources_dir],app_dir,self.deploytype != 'test',module_js)
if self.deploytype == 'production':
debugger_plist = os.path.join(app_dir,'debugger.plist')
if os.path.exists(debugger_plist):
os.remove(debugger_plist)
if self.deploytype!='development' and self.deploytype!='export':
defines_file = os.path.join(self.classes_dir, 'defines.h')
defines_header = open(defines_file,'w+')
defines_content = "// Warning: this is generated file. Do not modify!\n\n"
defines_content+= "#define TI_VERSION %s\n"%sdk_version
for sym in self.defines:
defines_content+="#define %s\n" % sym
if defines_content!=defines_header.read():
defines_header.write(defines_content)
defines_header.close()
# deploy any module image files
for module in self.modules:
img_dir = os.path.join(template_dir,'modules',module.lower(),'images')
print "[DEBUG] module image = %s" % img_dir
if not os.path.exists(img_dir): continue
dest_img_dir = os.path.join(app_dir,'modules',module.lower(),'images')
if not os.path.exists(dest_img_dir):
os.makedirs(dest_img_dir)
self.copy_resources([img_dir],dest_img_dir,False)
if self.deploytype!='development' and os.path.exists(app_dir):
# optimize PNGs - since we don't include them in the Resources of the xcodeproj
# the ones we copy in won't get optimized so we need to run it manually
# we can skip this on the simulator but should do it on device
dev_path = "/Developer"
# we need to ask xcode where the root path is
path = run.run(["/usr/bin/xcode-select","-print-path"],True,False)
if path:
dev_path = path.strip()
run.run(["%s/Platforms/iPhoneOS.platform/Developer/usr/bin/iphoneos-optimize"%dev_path,app_dir],False)
# remove empty directories
os.chdir(app_dir)
os.system("find . -type d -empty -delete")
else:
print "[INFO] Skipping JS compile, running from simulator"
if self.deploytype=='development':
softlink_for_simulator(self.project_dir,app_dir)
def compile_module(self):
root_asset = self.compile_commonjs_file(self.appid+'.js', os.path.join(self.assets_dir, self.appid+'.js'))
js_files = []
for root, dirs, files in os.walk(self.assets_dir, True, None, True):
for file in [f for f in files if os.path.splitext(f)[1] == '.js']:
full_path = os.path.join(root, file)
self.compile_js_file(os.path.relpath(full_path, self.assets_dir), full_path, js_files)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
module_assets = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
# Clean up the generated assets
for file in js_files:
os.remove(os.path.join(self.assets_dir, file))
return (root_asset, module_assets)
def load_metadata(self, file):
module_metadata = open(file,'r')
metadata = json.load(module_metadata)
module_metadata.close()
for symbol in metadata['exports']:
self.add_symbol(symbol)
def add_symbol(self,api):
print "[DEBUG] detected symbol: %s" % api
curtoken = ''
tokens = api.split(".")
try:
self.modules.index(tokens[0])
except:
self.modules.append(tokens[0])
for token in tokens:
curtoken+=token+"."
symbol = 'USE_TI_%s' % (curtoken.replace('.create','').replace('.','').replace('-','_').upper())
try:
self.defines.index(symbol)
except:
self.defines.append(symbol)
def extract_tokens(self,sym,line):
# sloppy joe parsing coooode
# could be prettier and faster but it works and rather reliable
c = 0
tokens = []
search = sym + "."
size = len(search)
while True:
i = line.find(search,c)
if i < 0:
break
found = False
buf = ''
x = 0
for n in line[i+size:]:
# look for a terminal - this could probably be easier
if n in ['(',')','{','}','=',',',' ',':','!','[',']','+','*','/','~','^','%','\n','\t','\r']:
found = True
break
buf+=n
x+=1
tokens.append(buf)
if found:
c = i + x + 1
continue
break
return sorted(set(tokens))
def compile_js(self,file_contents):
for line in file_contents.split(';'):
for symbol in ('Titanium','Ti'):
for sym in self.extract_tokens(symbol,line):
self.add_symbol(sym)
self.exports.append(sym)
def process_html_files(self,data,source_root):
compile = []
if data.has_key('.js'):
for entry in data['.html']:
html_file = entry['from']
file_contents = open(os.path.expanduser(html_file)).read()
parser = HTMLParser()
parser.parse(file_contents)
# extract all our scripts that are dependencies and we
# don't compile these
scripts = parser.get_scripts()
if len(scripts) > 0:
js_files = data['.js']
for script in scripts:
# if a remote script, ignore
if script.startswith('http:') or script.startswith('https:'):
continue
if script.startswith('app://'):
script = script[6:]
# build a file relative to the html file
fullpath = os.path.abspath(os.path.join(os.path.dirname(html_file),script))
# remove this script from being compiled
for f in js_files:
if f['from']==fullpath:
# target it to be compiled
compile.append(f)
js_files.remove(f)
break
return compile
def compile_js_asset_file(self,path,file):
file_contents = open(os.path.expanduser(file)).read()
if self.deploytype == 'production' or self.deploytype == 'commonjs':
file_contents = jspacker.jsmin(file_contents)
file_contents = file_contents.replace('Titanium.','Ti.')
self.compile_js(file_contents)
path = os.path.join(self.assets_dir,path)
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
tfile = open(path,'w+')
tfile.write(file_contents)
tfile.close()
# TODO: We should remove this when we can "safely" say we no longer support
# versions prior to 2.1, and also change the module loader code in iOS to
# no longer check for moduleAsset.
def compile_commonjs_file(self,path,from_):
js_files = []
self.compile_js_file(path, from_, js_files)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
so = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
return so
def compile_js_file(self, path, from_, js_files):
print "[DEBUG] compiling: %s" % from_
path = path.replace('.','_')
self.compile_js_asset_file(path,from_)
js_files.append(path);
def copy_resources(self,sources,target,write_routing=True,module_js=[]):
js_files = []
if write_routing:
intf = open(os.path.join(self.classes_dir,'ApplicationRouting.h'),'w+')
impf = open(os.path.join(self.classes_dir,'ApplicationRouting.m'),'w+')
intf.write(HEADER)
intf.write(INTERFACE_HEADER)
impf.write(HEADER)
impf.write(IMPL_HEADER)
impf.write("+ (NSData*) resolveAppAsset:(NSString*)path;\n{\n")
if not os.path.exists(os.path.expanduser(target)):
os.makedirs(os.path.expanduser(target))
if not os.path.exists(self.assets_dir):
os.makedirs(self.assets_dir)
def compile_js_file(path,from_):
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) compiling: %s" % (hour, minute, second, from_)
path = path.replace('.','_')
self.compile_js_asset_file(path,from_)
js_files.append(path);
def compile_js_files():
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) packaging javascript" % (hour, minute, second)
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
titanium_prep = os.path.abspath(os.path.join(template_dir,'titanium_prep'))
cmdinputfile = tempfile.TemporaryFile()
cmdinputfile.write('\n'.join(js_files))
cmdinputfile.seek(0)
so = subprocess.Popen([titanium_prep, self.appid, self.assets_dir], stdin=cmdinputfile,stderr=subprocess.STDOUT,stdout=subprocess.PIPE).communicate()[0]
cmdinputfile.close()
impf.write(so)
year, month, day, hour, minute, second, weekday, yearday, daylight = time.localtime(time.time())
print "[DEBUG] (%02d:%02d:%02d) packaging finished" % (hour, minute, second)
def add_compiled_resources(source,target):
print "[DEBUG] copy resources from %s to %s" % (source,target)
compiled_targets = {}
for root, dirs, files in os.walk(source, True, None, True):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles:
continue
prefix = root[len(source):]
from_ = to_unicode_or_not(os.path.join(root, file))
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(os.path.split(to_)[0])
if not os.path.exists(to_directory):
os.makedirs(to_directory)
fp = os.path.splitext(file)
ext = fp[1]
if ext == '.jss': continue
if len(fp)>1 and ext in ['.html','.js','.css']:
path = prefix + os.sep + file
path = path[1:]
entry = {'path':path,'from':from_,'to':to_}
if compiled_targets.has_key(ext):
compiled_targets[ext].append(entry)
else:
compiled_targets[ext]=[entry]
if not (write_routing and len(fp)>1 and ext in ['.html','.js','.css']):
# only copy if different filesize or doesn't exist
if not os.path.exists(to_) or os.path.getsize(from_)!=os.path.getsize(to_):
print "[DEBUG] copying: %s to %s" % (from_,to_)
shutil.copyfile(from_, to_)
if compiled_targets.has_key('.html'):
compiled = self.process_html_files(compiled_targets,source)
if len(compiled) > 0:
for c in compiled:
from_ = c['from']
to_ = c['to']
path = c['path']
print "[DEBUG] copying: %s to %s" % (from_,to_)
file_contents = open(from_).read()
file_contents = jspacker.jsmin(file_contents)
file_contents = file_contents.replace('Titanium.','Ti.')
to = open(to_,'w')
to.write(file_contents)
to.close()
for ext in ('.css','.html'):
if compiled_targets.has_key(ext):
for css_file in compiled_targets[ext]:
from_ = css_file['from']
to_ = css_file['to']
print "[DEBUG] copying: %s to %s" % (from_,to_)
if path.endswith('.css'):
file_contents = open(from_).read()
packer = CSSPacker(file_contents)
file_contents = packer.pack()
to = open(to_,'w')
to.write(file_contents)
to.close()
else:
shutil.copyfile(from_, to_)
if compiled_targets.has_key('.js'):
for js_file in compiled_targets['.js']:
path = js_file['path']
from_ = js_file['from']
compile_js_file(path, from_)
# copy in any module assets
for metadata in self.modules_metadata:
tp_dir = os.path.join(metadata['dir'],'assets')
if not os.path.exists(tp_dir): continue
tp_id = metadata['id']
t = '%s/modules/%s' %(target,tp_id)
add_compiled_resources(tp_dir,t)
for source in sources:
add_compiled_resources(source,target)
for js_file in module_js:
compile_js_file(js_file['path'], js_file['from'])
if write_routing:
compile_js_files();
impf.write("\tNSNumber *index = [map objectForKey:path];\n")
impf.write("\tif (index == nil) { return nil; }\n")
impf.write("\treturn filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);\n")
impf.write('}\n')
intf.write(FOOTER)
impf.write(FOOTER)
intf.close()
impf.close()
if __name__ == "__main__":
argv = sys.argv
if len(argv) < 3:
print "[USAGE] %s <dir> <deploytype> [devicetype] [ios_version] [sdk_version]" % argv[0]
exit(1)
project_dir = argv[1]
deploytype = argv[2]
if deploytype == 'export-build':
xcode = True
else:
xcode = False
if len(argv) >= 4:
devicefamily = argv[3]
else:
devicefamily = 'unknown'
if len(argv) >= 5:
ios = argv[4]
else:
ios = 'unknown'
if len(argv) >= 6:
sdk = argv[5]
else:
sdk = None
tiapp_xml = os.path.join(project_dir,'tiapp.xml')
ti = TiAppXML(tiapp_xml)
appid = ti.properties['id']
name = ti.properties['name']
c = Compiler(project_dir,appid,name,deploytype)
c.compileProject(xcode,devicefamily,ios,sdk=sdk)
|
{
"content_hash": "281e792a27acd4c6adc881c76b9a2446",
"timestamp": "",
"source": "github",
"line_count": 751,
"max_line_length": 265,
"avg_line_length": 34.77230359520639,
"alnum_prop": 0.6650838630619591,
"repo_name": "jvkops/titanium_mobile",
"id": "ad62b36ed7cd2f51d4ceb918ca84a68ffe8ed76c",
"size": "26315",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "support/iphone/compiler.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2552"
},
{
"name": "C",
"bytes": "201981"
},
{
"name": "C#",
"bytes": "80533"
},
{
"name": "C++",
"bytes": "194739"
},
{
"name": "CSS",
"bytes": "16297"
},
{
"name": "HTML",
"bytes": "63276"
},
{
"name": "Java",
"bytes": "2593521"
},
{
"name": "JavaScript",
"bytes": "4243578"
},
{
"name": "Makefile",
"bytes": "7605"
},
{
"name": "Mako",
"bytes": "1855"
},
{
"name": "Objective-C",
"bytes": "3597254"
},
{
"name": "Objective-C++",
"bytes": "8560"
},
{
"name": "Python",
"bytes": "1481917"
},
{
"name": "Shell",
"bytes": "23258"
}
],
"symlink_target": ""
}
|
import numpy as np
import logging
import qmpy
from .renderable import *
logger = logging.getLogger(__name__)
class Point(Renderable):
def __init__(self, coord, label=None, **kwargs):
if isinstance(coord, Point):
self.coord = list(coord.coord)
self.label = coord.label
self.options = coord.options
self.options.update(kwargs)
else:
self.coord = list(coord)
self.label = label
self.options = kwargs
def __eq__(self, other):
if not np.allclose(self.coord, other.coord):
return False
if not self.label == other.label:
return False
return True
@property
def dim(self):
return len(self.coord)
def draw_in_matplotlib(self, **kwargs):
if not kwargs.get("axes"):
axes = plt.gca()
else:
axes = kwargs["axes"]
options = dict(self.options)
options.update(kwargs)
axes.scatter(*self.coord, **options)
# if self.label:
# plt.text(x, y, self.label)
def get_flot_series(self, **kwargs):
pc = PointCollection([self], **self.options)
return pc.get_flot_series()
class PointCollection(Renderable):
def __init__(self, points, label=None, fill=False, **kwargs):
self.points = [Point(pt) for pt in points]
self.label = label
self.fill = fill
self.options = kwargs
@property
def as_pairs(self):
return [pt.coord for pt in self.points]
@property
def as_axes(self):
return [[pt.coord[i] for pt in self.points] for i in range(self.dim)]
def draw_in_matplotlib(self, **kwargs):
options = dict(self.options)
options.update(kwargs)
for point in self.points:
point.draw_in_matplotlib(**options)
def get_flot_series(self, **kwargs):
series = {"data": self.as_pairs, "points": {"show": True, "fill": self.fill}}
if self.label:
series["label"] = self.label
series.update(self.options)
series["labels"] = [pt.label for pt in self.points]
series.update(kwargs)
return series
@property
def dim(self):
return min([p.dim for p in self.points])
|
{
"content_hash": "d79ce5d26cafdabf0d7aeccfaabe2935",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 85,
"avg_line_length": 28.246913580246915,
"alnum_prop": 0.5738636363636364,
"repo_name": "wolverton-research-group/qmpy",
"id": "d5a7fde3b6afd723f28fd7e07cb928b22dfc23ad",
"size": "2288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qmpy/utils/rendering/point.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32296"
},
{
"name": "Cython",
"bytes": "745"
},
{
"name": "GAP",
"bytes": "5012"
},
{
"name": "HTML",
"bytes": "144402"
},
{
"name": "JavaScript",
"bytes": "16825352"
},
{
"name": "PHP",
"bytes": "7301"
},
{
"name": "Python",
"bytes": "800075"
},
{
"name": "Shell",
"bytes": "2784"
}
],
"symlink_target": ""
}
|
from experimental_framework.benchmarks import benchmark_base_class
from experimental_framework.packet_generators \
import dpdk_packet_generator as dpdk
import experimental_framework.common as common
from experimental_framework.constants import framework_parameters as fp
PACKET_SIZE = 'packet_size'
VLAN_SENDER = 'vlan_sender'
VLAN_RECEIVER = 'vlan_receiver'
class RFC2544ThroughputBenchmark(benchmark_base_class.BenchmarkBaseClass):
"""
Calculates the throughput of the VNF under test according to the RFC2544.
"""
def __init__(self, name, params):
benchmark_base_class.BenchmarkBaseClass.__init__(self, name, params)
self.base_dir = common.get_base_dir() + \
fp.EXPERIMENTAL_FRAMEWORK_DIR + fp.DPDK_PKTGEN_DIR
self.results_file = self.base_dir + 'experiment.res'
self.lua_file = self.base_dir + 'rfc2544.lua'
def init(self):
"""
Initialize the benchmark
:return: None
"""
pass
def finalize(self):
"""
:return: None
"""
pass
def get_features(self):
"""
Returns the features associated to the benchmark
:return:
"""
features = dict()
features['description'] = 'RFC 2544 Throughput calculation'
features['parameters'] = [PACKET_SIZE, VLAN_SENDER, VLAN_RECEIVER]
features['allowed_values'] = dict()
features['allowed_values'][PACKET_SIZE] = ['64', '128', '256', '512',
'1024', '1280', '1514']
features['allowed_values'][VLAN_SENDER] = map(str, range(-1, 4096))
features['allowed_values'][VLAN_RECEIVER] = map(str, range(-1, 4096))
features['default_values'] = dict()
features['default_values'][PACKET_SIZE] = '1280'
features['default_values'][VLAN_SENDER] = '1007'
features['default_values'][VLAN_RECEIVER] = '1006'
return features
def run(self):
"""
Sends and receive traffic according to the RFC methodology in order
to measure the throughput of the workload
:return: Results of the testcase (type: dict)
"""
packet_size = self._extract_packet_size_from_params()
# Packetgen management
packetgen = dpdk.DpdkPacketGenerator()
self._configure_lua_file()
packetgen.init_dpdk_pktgen(dpdk_interfaces=2,
pcap_file_0='packet_' +
packet_size + '.pcap',
pcap_file_1='igmp.pcap',
lua_script='rfc2544.lua',
vlan_0=self.params[VLAN_SENDER],
vlan_1=self.params[VLAN_RECEIVER])
common.LOG.debug('Start the packet generator - packet size: ' +
str(packet_size))
packetgen.send_traffic()
common.LOG.debug('Stop the packet generator')
return self._get_results()
def _extract_packet_size_from_params(self):
"""
Extracts packet sizes from parameters
:return: packet_sizes (list)
"""
packet_size = '1280' # default value
if PACKET_SIZE in self.params.keys() and \
isinstance(self.params[PACKET_SIZE], str):
packet_size = self.params[PACKET_SIZE]
return packet_size
def _configure_lua_file(self):
"""
Configure the packet gen to write the results into the right file
:return: None
"""
common.replace_in_file(self.lua_file, 'local out_file = ""',
'local out_file = "' +
self.results_file + '"')
def _reset_lua_file(self):
"""
Sets back the configuration of the local file var to the default
:return:
"""
common.replace_in_file(self.lua_file, 'local out_file = "' +
self.results_file + '"',
'local out_file = ""')
def _get_results(self):
"""
Returns the results of the experiment
:return: None
"""
throughput = common.get_file_first_line(self.results_file)
ret_val = dict()
try:
ret_val['throughput'] = int(throughput)
except:
ret_val['throughput'] = 0
return ret_val
|
{
"content_hash": "b86968728d7b8344b6e805f1266f99bc",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 77,
"avg_line_length": 36.47540983606557,
"alnum_prop": 0.5532584269662921,
"repo_name": "dtudares/hello-world",
"id": "9db62e6399f570e2d5eda4481dd667d8caf194b0",
"size": "5063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yardstick/yardstick/vTC/apexlake/experimental_framework/benchmarks/rfc2544_throughput_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1247"
},
{
"name": "Shell",
"bytes": "49990"
}
],
"symlink_target": ""
}
|
# Hive Appier Framework
# Copyright (c) 2008-2015 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2015 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
from . import legacy
from . import exceptions
class Compress(object):
def __init__(self):
self._load_compress()
def load_jsmin(self):
try: import jsmin
except: self.jsmin = None; return
self.jsmin = jsmin
def type_jpeg(self):
return "image/jpeg"
def compress(self, file_path, modified = None, method = None):
# retrieves the modification data from the requested file and
# uses it to construct the complete (cache) key to be used in
# cache try, in case there's a match returns the new file
modified = modified or os.path.getmtime(file_path)
key = "%s:%s" % (method, file_path)
result = self.try_cache(key, modified)
if result: return (len(result), legacy.BytesIO(result))
# in case there's no provided method a proper not found exception
# should be raised so that the end user is notified about the non
# existence of such unset compressor (as expected)
if method == None: raise exceptions.NotFoundError(
message = "Compressor is not defined",
code = 404
)
# in case the compress string is defined, tries to find the proper
# compress method and in case it's not found raises an exception
if not hasattr(self, "compress_" + method): raise exceptions.NotFoundError(
message = "Compressor '%s' not found" % method,
code = 404
)
# retrieves the proper compressor method for the requested compress
# technique, this should be used in a dynamic way enforcing some
# overhead to avoid extra issues while handling with files
compressor = getattr(self, "compress_" + method)
# opens the requested file and reads the complete set of information
# from it closing the file object after the operation is complete
file = open(file_path, "rb")
try: data = file.read()
finally: file.close()
# runs the compressing operation using the target compressor and uses
# the resulting (compressed) data as the value to be returned
result = compressor(data)
# constructs both the size and the file object from the resulting plain
# string data (bytes sequence), these values are considered the result
result_size = len(result)
result_file = legacy.BytesIO(result)
# flags the proper values in the cache so that they may be re-used in case
# the flag remains the same for the key, then returns the resulting tuple
self.flag_cache(key, modified, result)
return (result_size, result_file)
def compress_jpeg(self, file_path):
if self.jinja: return self.compress_jpeg_pil(file_path)
return self.compress_fallback(file_path)
def compress_jpeg_pil(self, data, quality = 80):
input = legacy.BytesIO(data)
output = legacy.BytesIO()
image = self.pil.Image.open(input)
image.save(output, format = "jpeg", quality = quality, optimize = True)
output.seek(0, os.SEEK_SET)
data = output.read()
return data
def compress_js(self, data):
if self.jsmin: return self.compress_js_jsmin(data)
return self.compress_fallback(data)
def compress_js_jsmin(self, data):
return self.jsmin.jsmin(data)
def compress_fallback(self, data):
return data
def _load_compress(self):
self.load_jsmin()
|
{
"content_hash": "b447ca5ec571841bdbe620b1ce53787d",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 83,
"avg_line_length": 37.58914728682171,
"alnum_prop": 0.641781810682615,
"repo_name": "JacquesBonet/appier",
"id": "450c29f9fa3549c98f27eec8e83853b75dcb31e1",
"size": "4895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/appier/compress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "987"
},
{
"name": "Python",
"bytes": "470178"
},
{
"name": "Smarty",
"bytes": "1903"
}
],
"symlink_target": ""
}
|
import pytest
import unittest
from modules.sfp_abuseipdb import sfp_abuseipdb
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleAbuseipdb(unittest.TestCase):
def test_opts(self):
module = sfp_abuseipdb()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_abuseipdb()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_abuseipdb()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_abuseipdb()
self.assertIsInstance(module.producedEvents(), list)
def test_handleEvent_no_api_key_should_set_errorState(self):
sf = SpiderFoot(self.default_options)
module = sfp_abuseipdb()
module.setup(sf, dict())
target_value = 'example target value'
target_type = 'IP_ADDRESS'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
self.assertTrue(module.errorState)
|
{
"content_hash": "b6f9056aaab494f5c08c2822de56796e",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 81,
"avg_line_length": 30.020408163265305,
"alnum_prop": 0.6689326988443236,
"repo_name": "smicallef/spiderfoot",
"id": "b4fe8e165126afa63eecd3d284426c5519d4f376",
"size": "1471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/modules/test_sfp_abuseipdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9833"
},
{
"name": "Dockerfile",
"bytes": "2779"
},
{
"name": "JavaScript",
"bytes": "34248"
},
{
"name": "Python",
"bytes": "2845553"
},
{
"name": "RobotFramework",
"bytes": "7584"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
}
|
from typing import Optional
from ray_release.config import Test
from ray_release.result import Result
def handle_result(
test: Test,
result: Result,
) -> Optional[str]:
last_update_diff = result.results.get("last_update_diff", float("inf"))
test_name = test["legacy"]["test_name"]
if test_name in [
"actor_deaths",
"many_actor_tasks",
"many_drivers",
"many_tasks",
"many_tasks_serialized_ids",
"node_failures",
"object_spilling_shuffle",
]:
# Core tests
target_update_diff = 300
elif test_name in ["apex", "impala", "many_ppo", "pbt"]:
# Tune/RLlib style tests
target_update_diff = 480
elif test_name in ["serve", "serve_failure"]:
# Serve tests have workload logs every five minutes.
# Leave up to 180 seconds overhead.
target_update_diff = 480
else:
return None
if last_update_diff > target_update_diff:
return (
f"Last update to results json was too long ago "
f"({last_update_diff:.2f} > {target_update_diff})"
)
return None
|
{
"content_hash": "afceb61993f31f674ac172137f3dd40b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 75,
"avg_line_length": 26.53488372093023,
"alnum_prop": 0.5898334794040315,
"repo_name": "ray-project/ray",
"id": "ffbd02bde9abffe91997b2828345e955ab299e74",
"size": "1141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "release/ray_release/alerts/long_running_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
"""Common runtime ctypes."""
# pylint: disable=invalid-name
import ctypes
import json
import numpy as np
from .base import _LIB, check_call
tvm_shape_index_t = ctypes.c_int64
class ArgTypeCode(object):
"""Type code used in API calls"""
INT = 0
UINT = 1
FLOAT = 2
HANDLE = 3
NULL = 4
TVM_TYPE = 5
DLDEVICE = 6
DLTENSOR_HANDLE = 7
OBJECT_HANDLE = 8
MODULE_HANDLE = 9
PACKED_FUNC_HANDLE = 10
STR = 11
BYTES = 12
NDARRAY_HANDLE = 13
OBJECT_RVALUE_REF_ARG = 14
EXT_BEGIN = 15
class TVMByteArray(ctypes.Structure):
"""Temp data structure for byte array."""
_fields_ = [("data", ctypes.POINTER(ctypes.c_byte)), ("size", ctypes.c_size_t)]
class DataTypeCode(object):
"""DataType code in DLTensor."""
INT = 0
UINT = 1
FLOAT = 2
HANDLE = 3
BFLOAT = 4
class DataType(ctypes.Structure):
"""TVM datatype structure"""
_fields_ = [("type_code", ctypes.c_uint8), ("bits", ctypes.c_uint8), ("lanes", ctypes.c_uint16)]
CODE2STR = {
DataTypeCode.INT: "int",
DataTypeCode.UINT: "uint",
DataTypeCode.FLOAT: "float",
DataTypeCode.HANDLE: "handle",
DataTypeCode.BFLOAT: "bfloat",
}
NUMPY2STR = {
np.dtype(np.bool_): "bool",
np.dtype(np.int8): "int8",
np.dtype(np.int16): "int16",
np.dtype(np.int32): "int32",
np.dtype(np.int64): "int64",
np.dtype(np.uint8): "uint8",
np.dtype(np.uint16): "uint16",
np.dtype(np.uint32): "uint32",
np.dtype(np.uint64): "uint64",
np.dtype(np.float16): "float16",
np.dtype(np.float32): "float32",
np.dtype(np.float64): "float64",
np.dtype(np.float_): "float64",
}
STR2DTYPE = {
"bool": {"type_code": DataTypeCode.UINT, "bits": 1, "lanes": 1},
"int8": {"type_code": DataTypeCode.INT, "bits": 8, "lanes": 1},
"int16": {"type_code": DataTypeCode.INT, "bits": 16, "lanes": 1},
"int32": {"type_code": DataTypeCode.INT, "bits": 32, "lanes": 1},
"int64": {"type_code": DataTypeCode.INT, "bits": 64, "lanes": 1},
"uint8": {"type_code": DataTypeCode.UINT, "bits": 8, "lanes": 1},
"uint16": {"type_code": DataTypeCode.UINT, "bits": 16, "lanes": 1},
"uint32": {"type_code": DataTypeCode.UINT, "bits": 32, "lanes": 1},
"uint64": {"type_code": DataTypeCode.UINT, "bits": 64, "lanes": 1},
"float16": {"type_code": DataTypeCode.FLOAT, "bits": 16, "lanes": 1},
"float32": {"type_code": DataTypeCode.FLOAT, "bits": 32, "lanes": 1},
"float64": {"type_code": DataTypeCode.FLOAT, "bits": 64, "lanes": 1},
}
def __init__(self, type_str):
super(DataType, self).__init__()
numpy_str_map = DataType.NUMPY2STR
if type_str in numpy_str_map:
type_str = numpy_str_map[type_str]
elif isinstance(type_str, np.dtype):
type_str = str(type_str)
assert isinstance(type_str, str)
str_dtype_map = DataType.STR2DTYPE
if type_str in str_dtype_map:
dtype_map = str_dtype_map[type_str]
self.bits = dtype_map["bits"]
self.type_code = dtype_map["type_code"]
self.lanes = dtype_map["lanes"]
return
arr = type_str.split("x")
head = arr[0]
self.lanes = int(arr[1]) if len(arr) > 1 else 1
bits = 32
if head.startswith("int"):
self.type_code = DataTypeCode.INT
head = head[3:]
elif head.startswith("uint"):
self.type_code = DataTypeCode.UINT
head = head[4:]
elif head.startswith("float"):
self.type_code = DataTypeCode.FLOAT
head = head[5:]
elif head.startswith("handle"):
self.type_code = DataTypeCode.HANDLE
bits = 64
head = ""
elif head.startswith("bfloat"):
self.type_code = DataTypeCode.BFLOAT
head = head[6:]
elif head.startswith("custom"):
# pylint: disable=import-outside-toplevel
import tvm.runtime._ffi_api
low, high = head.find("["), head.find("]")
if not low or not high or low >= high:
raise ValueError("Badly formatted custom type string %s" % type_str)
type_name = head[low + 1 : high]
self.type_code = tvm.runtime._ffi_api._datatype_get_type_code(type_name)
head = head[high + 1 :]
else:
raise ValueError("Do not know how to handle type %s" % type_str)
bits = int(head) if head else bits
self.bits = bits
def __repr__(self):
# pylint: disable=import-outside-toplevel
if self.bits == 1 and self.lanes == 1:
return "bool"
if self.type_code in DataType.CODE2STR:
type_name = DataType.CODE2STR[self.type_code]
else:
import tvm.runtime._ffi_api
type_name = "custom[%s]" % tvm.runtime._ffi_api._datatype_get_type_name(self.type_code)
x = "%s%d" % (type_name, self.bits)
if self.lanes != 1:
x += "x%d" % self.lanes
return x
def __eq__(self, other):
return (
self.bits == other.bits
and self.type_code == other.type_code
and self.lanes == other.lanes
)
def __ne__(self, other):
return not self.__eq__(other)
RPC_SESS_MASK = 128
class Device(ctypes.Structure):
"""TVM device strucure.
Typically constructed using convenience function
:meth:`tvm.runtime.device`.
Exposes uniform interface to device-specific APIs such as CUDA or
OpenCL. Some properties may return None depending on whether an
API exposes that particular property.
"""
_fields_ = [("device_type", ctypes.c_int), ("device_id", ctypes.c_int)]
MASK2STR = {
1: "cpu",
2: "cuda",
4: "opencl",
5: "aocl",
7: "vulkan",
8: "metal",
9: "vpi",
10: "rocm",
12: "ext_dev",
14: "hexagon",
15: "webgpu",
}
STR2MASK = {
"llvm": 1,
"stackvm": 1,
"cpu": 1,
"c": 1,
"hybrid": 1,
"composite": 1,
"cuda": 2,
"nvptx": 2,
"cl": 4,
"opencl": 4,
"sdaccel": 4,
"aocl": 5,
"aocl_sw_emu": 5,
"vulkan": 7,
"metal": 8,
"vpi": 9,
"rocm": 10,
"ext_dev": 12,
"hexagon": 14,
"webgpu": 15,
}
def __init__(self, device_type, device_id):
super(Device, self).__init__()
self.device_type = int(device_type)
self.device_id = device_id
def _GetDeviceAttr(self, device_type, device_id, attr_id):
"""Internal helper function to invoke runtime.GetDeviceAttr"""
# pylint: disable=import-outside-toplevel
import tvm.runtime._ffi_api
return tvm.runtime._ffi_api.GetDeviceAttr(device_type, device_id, attr_id)
@property
def exist(self):
"""Whether this device exists.
Returns True if TVM has support for the device, if the
physical device is present, and the device is accessible
through appropriate drivers (e.g. cuda/vulkan).
Returns
-------
exist : bool
True if the device exists
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 0) != 0
@property
def max_threads_per_block(self):
"""Maximum number of threads on each block.
Returns device value for cuda, metal, rocm, opencl, and vulkan
devices. Returns remote device value for RPC devices.
Returns None for all other devices.
Returns
-------
max_threads_per_block : int or None
The number of threads on each block
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 1)
@property
def warp_size(self):
"""Number of threads that execute concurrently.
Returns device value for for cuda, rocm, and vulkan. Returns
1 for metal and opencl devices, regardless of the physical
device. Returns remote device value for RPC devices. Returns
None for all other devices.
Returns
-------
warp_size : int or None
Number of threads that execute concurrently
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 2)
@property
def max_shared_memory_per_block(self):
"""Total amount of shared memory per block in bytes.
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
max_shared_memory_per_block : int or None
Total amount of shared memory per block in bytes
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 3)
@property
def compute_version(self):
"""Get compute version number as string.
Returns maximum API version (e.g. CUDA/OpenCL/Vulkan)
supported by the device.
Returns device value for cuda, rocm, opencl, and
vulkan. Returns remote device value for RPC devices. Returns
None for all other devices.
Returns
-------
version : str or None
The version string in `major.minor` format.
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 4)
@property
def device_name(self):
"""Return the vendor-specific name of device.
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
device_name : str or None
The name of the device.
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 5)
@property
def max_clock_rate(self):
"""Return the max clock frequency of device (kHz).
Returns device value for cuda, rocm, and opencl. Returns
remote device value for RPC devices. Returns None for all
other devices.
Returns
-------
max_clock_rate : int or None
The maximum clock frequency of the device (kHz)
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 6)
@property
def multi_processor_count(self):
"""Return the number of compute units in the device.
Returns device value for cuda, rocm, and opencl. Returns
remote device value for RPC devices. Returns None for all
other devices.
Returns
-------
multi_processor_count : int or None
Thee number of compute units in the device
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 7)
@property
def max_thread_dimensions(self):
"""Return the maximum size of each thread axis
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
dims: List of int, or None
The maximum length of threadIdx.x, threadIdx.y, threadIdx.z
"""
return json.loads(self._GetDeviceAttr(self.device_type, self.device_id, 8))
@property
def api_version(self):
"""Returns version number of the SDK used to compile TVM.
For example, CUDA_VERSION for cuda or VK_HEADER_VERSION for
Vulkan.
Returns device value for cuda, rocm, opencl, and vulkan.
Returns remote device value for RPC devices. Returns None for
all other devices.
Returns
-------
version : int or None
The version of the SDK
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 11)
@property
def driver_version(self):
"""Returns version number of the driver
Returns driver vendor's internal version number.
(e.g. "450.408.256" for nvidia-driver-450)
Returns device value for opencl and vulkan. Returns remote
device value for RPC devices. Returns None for all other
devices.
Returns
-------
version : str or None
The version string in `major.minor.patch` format.
"""
return self._GetDeviceAttr(self.device_type, self.device_id, 12)
def create_raw_stream(self):
"""Create a new runtime stream at the context.
User should free the stream after use.
Returns
-------
stream : TVMStreamHandle
The created runtime stream.
"""
stream = ctypes.c_void_p()
check_call(_LIB.TVMStreamCreate(self.device_type, self.device_id, ctypes.byref(stream)))
return stream
def free_raw_stream(self, stream):
"""Free a created stream handle.
Parameters
----------
stream : TVMStreamHandle
The stream which should to be released.
"""
check_call(_LIB.TVMStreamFree(self.device_type, self.device_id, stream))
def set_raw_stream(self, stream):
"""Set a created stream handle.
Parameters
----------
stream : TVMStreamHandle
The stream which should to be set to the device.
"""
check_call(_LIB.TVMSetStream(self.device_type, self.device_id, stream))
def sync(self, stream=None):
"""Synchronize until jobs finished at the context.
Parameters
----------
stream : TVMStreamHandle
Jobs in this stream should be finished.
"""
check_call(_LIB.TVMSynchronize(self.device_type, self.device_id, stream))
def __eq__(self, other):
return (
isinstance(other, Device)
and self.device_id == other.device_id
and self.device_type == other.device_type
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
def __repr__(self):
if self.device_type >= RPC_SESS_MASK:
tbl_id = self.device_type / RPC_SESS_MASK - 1
dev_type = self.device_type % RPC_SESS_MASK
return "remote[%d]:%s(%d)" % (tbl_id, Device.MASK2STR[dev_type], self.device_id)
return "%s(%d)" % (Device.MASK2STR[self.device_type], self.device_id)
class TVMArray(ctypes.Structure):
"""TVMValue in C API"""
_fields_ = [
("data", ctypes.c_void_p),
("device", Device),
("ndim", ctypes.c_int),
("dtype", DataType),
("shape", ctypes.POINTER(tvm_shape_index_t)),
("strides", ctypes.POINTER(tvm_shape_index_t)),
("byte_offset", ctypes.c_uint64),
]
class ObjectRValueRef:
"""Represent an RValue ref to an object that can be moved.
Parameters
----------
obj : tvm.runtime.Object
The object that this value refers to
"""
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
TVMArrayHandle = ctypes.POINTER(TVMArray)
|
{
"content_hash": "86a62f0132bfa9f11f6b26fd4113529e",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 100,
"avg_line_length": 29.83820662768031,
"alnum_prop": 0.5684327431893905,
"repo_name": "Laurawly/tvm-1",
"id": "03a68e9f9720662b9d4e3d9145150179da8ccab2",
"size": "16092",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/tvm/_ffi/runtime_ctypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4093"
},
{
"name": "C",
"bytes": "351611"
},
{
"name": "C++",
"bytes": "11660999"
},
{
"name": "CMake",
"bytes": "228510"
},
{
"name": "Cuda",
"bytes": "16902"
},
{
"name": "Cython",
"bytes": "28979"
},
{
"name": "Go",
"bytes": "111527"
},
{
"name": "HTML",
"bytes": "2664"
},
{
"name": "Java",
"bytes": "199950"
},
{
"name": "JavaScript",
"bytes": "15305"
},
{
"name": "Makefile",
"bytes": "67149"
},
{
"name": "Objective-C",
"bytes": "24259"
},
{
"name": "Objective-C++",
"bytes": "87655"
},
{
"name": "Python",
"bytes": "16256580"
},
{
"name": "RenderScript",
"bytes": "1895"
},
{
"name": "Rust",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "228674"
},
{
"name": "TypeScript",
"bytes": "94385"
}
],
"symlink_target": ""
}
|
import os
import re
import commands
import traceback
from time import localtime
from glob import glob
from shutil import copy2, rmtree
import Mover as mover
from PilotErrors import PilotErrors
from pUtil import tolog, readpar, isLogfileCopied, isAnalysisJob, removeFiles, getFileGuid, PFCxml, createLockFile, \
getMetadata, returnLogMsg, removeLEDuplicates, getPilotlogFilename, remove, getExeErrors, updateJobState, \
makeJobReport, chdir, addSkippedToPFC, updateMetadata, getJobReport, filterJobReport, timeStamp, \
getPilotstderrFilename, safe_call, updateXMLWithSURLs, putMetadata, getCmtconfig, getExperiment, getSiteInformation, \
timedCommand, updateXMLWithEndpoints
from FileHandling import addToOSTransferDictionary, getOSTransferDictionaryFilename, getOSTransferDictionary, \
getWorkDirSizeFilename, getDirSize, storeWorkDirSize, addToJobReport, getJSONDictionary
from JobState import JobState
from FileState import FileState
from FileStateClient import updateFileState, dumpFileStates
from JobRecovery import JobRecovery
from Configuration import Configuration
class JobLog:
"""
Methods for handling the job log (e.g. postJobLog, updatePandaServer)
"""
# private data members
__error = PilotErrors() # PilotErrors object
def __init__(self):
""" Default initialization """
self.__env = Configuration()
def getLogFileGuid(self, tarFileGuid, logFile, jobId, workdir):
""" return the proper log file guid """
# if for some reason the log file guid is not known (e.g. in a problematic lost job)
# the guid should not be generated by PFCxml below, but be extracted from metadata-<jobId>.xml
_filename = os.path.join(workdir, "metadata-%s.xml" % (jobId))
fileGuid = getFileGuid(_filename, logFile)
if tarFileGuid != fileGuid:
if fileGuid == "":
tolog("!!WARNING!!1500!! Log file guid could not be found in %s" % (_filename))
else:
tolog("!!WARNING!!1500!! Encountered a disprepancy between job.tarFileGuid (value: %s) and %s (value: %s)" %\
(tarFileGuid, _filename, fileGuid))
tarFileGuid = fileGuid
else:
tolog("Log guid same as in metadata file")
if tarFileGuid == "":
tolog("!!WARNING!!1500!! Encountered an empty log file guid")
else:
tolog("Using log file guid: %s" % (tarFileGuid))
return tarFileGuid
def copyLogFile(self, dest, workdir, logFile, newDirNM):
""" copy the log file to a specific directory """
status = False
if dest == "None":
tolog("Log file will not be copied to neither SE nor any other directory")
else:
tolog("Log file will not be copied to SE, but to directory: %s" % (dest))
try:
copy2("%s/%s" % (workdir, logFile), dest)
except Exception, e:
tolog("!!WARNING!!1500!! Exception caught: Could not copy log file %s/%s from %s: %s" %\
(workdir, logFile, dest, str(e)))
status = False
else:
status = True
tolog("Successfully copied log file to destination")
try:
os.remove(logFile)
except Exception, e:
tolog("!!WARNING!!1500!! Exception caught: Could not remove %s: %s (ignore)" % (logFile, str(e)))
pass # ignore, return status True anyway
if os.path.exists(newDirNM):
self.removeTree(newDirNM)
return status
def removeTree(self, _dir):
""" Remove a non-empty directory """
if safe_call(rmtree, _dir):
tolog("Removed directory: %s" % (_dir))
## new sitemovers based implementation
def transferLogFile_new(self, job, site,
experiment, ## useless => consider job.experiment instead of
dest=None, ## old workflow?: destination dir: if set then apply mv logfile to `dst`, no other real transfers
jr=False ## job recover: currently not used?
):
"""
Transfer the log file to regular storage and to special OS if need (new site movers implementation)
:return: (status, job) # backward compatible return
note: returning `job` is useless since passed by reference
"""
if dest: # old (debug?) workflow: only copy log file to dest dir if specified
status = self.copyLogFile(dest, site.workdir, job.logFile, job.newDirNM)
state_value = "transferred" if status else "not_transferred"
# update the current file state
updateFileState(job.logFile, site.workdir, job.jobId, mode="file_state", state=state_value)
dumpFileStates(site.workdir, job.jobId)
return status, job
if job.inFiles: # old logic: remove any lingering input files from the work dir
removeFiles(job.workdir, job.inFiles)
# get the log file guid (if not set already)
#job.tarFileGuid = self.getLogFileGuid(job.tarFileGuid, job.logFile, job.jobId, site.workdir)
# create the xml needed for the registration if it doesn't exist already (for a secondary log transfer)
WDTxml = "%s.xml" % job.newDirNM
if not os.path.exists(WDTxml):
PFCxml(job.experiment, WDTxml, fntag="pfn", alog=job.logFile, alogguid=job.tarFileGuid, jr=jr)
else:
tolog("Log XML already exists: %s" % WDTxml)
experiment = job.experiment
_exp = getExperiment(experiment)
os_transfer = _exp.doSpecialLogFileTransfer(eventService=job.eventService, putLogToOS=job.putLogToOS)
if os_transfer: ## do transfer log file to ObjectStore
tolog("Special log transfer: Attempting log file transfer to ObjectStore")
job_work_dir = job.workdir
try:
t0 = os.times()
### fix job.workdir since log files are located outside job dir
job.workdir = site.workdir ### quick hack: FIX ME LATER
rc, pilotErrorDiag, rf, _dummy, filesNormalStageOut, filesAltStageOut = mover.put_data_new(job, site, stageoutTries=self.__env['stageoutretry'], log_transfer=False, special_log_transfer=True, workDir=site.workdir)
#job.filesNormalStageOut += filesNormalStageOut
#job.filesAltStageOut += filesAltStageOut
t1 = os.times()
job.timeStageOutLogSpecial = int(round(t1[4] - t0[4]))
except Exception, e:
t1 = os.times()
job.timeStageOutLogSpecial = int(round(t1[4] - t0[4]))
error = "FAILED to stage out log: %s, trace=%s" % (e, traceback.format_exc())
tolog(error)
pilotErrorDiag = "failed to stageout log: %s" % e
rc = PilotErrors.ERR_PUTFUNCNOCALL
job.workdir = job_work_dir ### quick hack: restore workdif
tolog("Put function [stage-outlog special] returned code: %s" % rc)
if rc:
tolog("WARNING: Failed to transfer log file to special SE (ObjectStore) .. skipped, error=%s" % pilotErrorDiag)
else:
# Update the OS transfer dictionary
for fspec in job.logSpecialData:
if fspec.status == 'transferred':
tolog(" -- INFO: lfn=%s has been successfully transferred to OS: %s, bucket_id=%s" % (fspec.lfn, job.logBucketID, job.logDDMEndpoint))
addToOSTransferDictionary(fspec.lfn, self.__env['pilot_initdir'], job.logBucketID, job.logDDMEndpoint)
# stage-out log file to regular SE
tolog("Attempting log file transfer to primary SE")
job_work_dir = job.workdir
try:
t0 = os.times()
### fix job.workdir since log files are located outside job dir
job.workdir = site.workdir ### quick hack: FIX ME LATER
rc, pilotErrorDiag, rf, _dummy, filesNormalStageOut, filesAltStageOut = mover.put_data_new(job, site, stageoutTries=self.__env['stageoutretry'], log_transfer=True, workDir=site.workdir)
job.filesNormalStageOut += filesNormalStageOut
job.filesAltStageOut += filesAltStageOut
t1 = os.times()
job.timeStageOutLog = int(round(t1[4] - t0[4]))
except Exception, e:
t1 = os.times()
job.timeStageOutLog = int(round(t1[4] - t0[4]))
error = "FAILED to stage out log: %s, trace=%s" % (e, traceback.format_exc())
tolog(error)
pilotErrorDiag = "failed to stageout log: %s" % e
rc = PilotErrors.ERR_PUTFUNCNOCALL
#job.setState(["holding", job.result[1], rc])
#return False, job
## quick hack: restore job workdir
job.workdir = job_work_dir
tolog("Put function [stage-outlog] returned code: %s" % rc)
if rc:
if pilotErrorDiag: # do not overwrite any existing pilotErrorDiag (from a get operation e.g.)
if job.pilotErrorDiag:
job.pilotErrorDiag += "|"
else:
job.pilotErrorDiag = ""
job.pilotErrorDiag += "Log put error: " + pilotErrorDiag
if job.pilotErrorDiag:
job.pilotErrorDiag = job.pilotErrorDiag[-256:]
# check if the job is recoverable?
_state, _msg = "failed", "FAILED"
if PilotErrors.isRecoverableErrorCode(rc) and '(unrecoverable)' not in pilotErrorDiag:
_state, _msg = "holding", "WARNING"
# set the error code for the log transfer only if there was no previous error (e.g. from the get-operation)
if job.result[2] == 0:
job.setState([_state, job.result[1], rc])
tolog("%s: %s" % (_msg, PilotErrors.getErrorStr(rc)))
tolog("%s: Could not transfer log file to primary SE" % (self.__env['errorLabel']))
else:
#job.setState(["finished", 0, 0])
createLockFile(self.__env['jobrec'], site.workdir, lockfile="LOGFILECOPIED_%s" % job.jobId)
# old logic: latereg, not used anymore?
job.log_latereg = "False" # to be deprecated?
job.log_field = None # to be deprecated
# clean up
if os.path.isdir(job.newDirNM):
self.removeTree(job.newDirNM)
try:
os.remove(WDTxml)
tolog("%s removed" % WDTxml)
except Exception, e:
tolog("WARNING: Could not remove %s: %s" % (WDTxml, e))
return (not rc, job)
@mover.use_newmover(transferLogFile_new)
def transferLogFile(self, job, site, experiment, dest=None, jr=False):
""" Transfer the log file to storage """
status = True
# transfer log file to special log SE (CERN via xrdcp)
# get the experiment object
thisExperiment = getExperiment(experiment)
if thisExperiment.doSpecialLogFileTransfer(eventService=job.eventService, putLogToOS=job.putLogToOS):
tolog("Preparing for log file transfer to special SE")
# get the site information object
si = getSiteInformation(experiment)
# first backup some schedconfig fields that need to be modified for the secondary transfer
copytool_org = readpar('copytool')
# temporarily modify the schedconfig fields with values for the secondary SE
tolog("Temporarily modifying queuedata for log file transfer to special SE")
#ec = si.replaceQueuedataField("copytool", "objectstore")
# do log transfer
tolog("Attempting log file transfer to special SE")
ret, job = self.transferActualLogFile(job, site, experiment, dest=dest, jr=jr, specialTransfer=True, copytool="objectstore")
if not ret:
tolog("!!WARNING!!1600!! Could not transfer log file to special SE")
#status = False
else:
# Update the OS transfer dictionary
# Get the OS name identifier and bucket endpoint
os_bucket_id = job.logBucketID
os_ddmendpoint = si.getObjectstoreDDMEndpointFromBucketID(os_bucket_id)
# Add the transferred file to the OS transfer file
addToOSTransferDictionary(job.logFile, self.__env['pilot_initdir'], os_bucket_id, os_ddmendpoint)
# finally restore the modified schedconfig fields
tolog("Restoring queuedata fields")
#ec = si.replaceQueuedataField("copytool", copytool_org)
else:
tolog("Special log file transfer not required")
# register/copy log file
tolog("Attempting log file transfer to primary SE")
ret, job = self.transferActualLogFile(job, site, experiment, dest=dest, jr=jr)
if not ret:
tolog("!!%s!!1600!! Could not transfer log file to primary SE" % (self.__env['errorLabel']))
status = False
return status, job
def transferActualLogFile(self, job, site, experiment, dest=None, jr=False, specialTransfer=False, copytool=None):
"""
Save log tarball in DDM and register it to catalog, or copy it to 'dest'.
the job recovery will use the current site info known by the current pilot
"""
status = True
pilotErrorDiag = ""
N_filesNormalStageOut = 0
N_filesAltStageOut = 0
if not self.__env['jobrec']:
self.__env['errorLabel'] = "FAILED"
# only check for performed log transfer for normal stage-out (not for any special transfers)
if isLogfileCopied(site.workdir, job.jobId) and not specialTransfer:
tolog("Log file already transferred")
return status, job
# only copy log file to dest dir if specified
if dest:
status = self.copyLogFile(dest, site.workdir, job.logFile, job.newDirNM)
# update the current file state
if status:
updateFileState(job.logFile, site.workdir, job.jobId, mode="file_state", state="transferred")
else:
updateFileState(job.logFile, site.workdir, job.jobId, mode="file_state", state="not_transferred")
dumpFileStates(site.workdir, job.jobId)
return status, job
# see if it's an analysis job or not
analyJob = isAnalysisJob(job.trf.split(",")[0])
# remove any lingering input files from the work dir
if len(job.inFiles) > 0:
ec = removeFiles(job.workdir, job.inFiles)
# get the log file guid (if not set already)
job.tarFileGuid = self.getLogFileGuid(job.tarFileGuid, job.logFile, job.jobId, site.workdir)
# the cmtconfig is needed by at least the xrdcp site mover
cmtconfig = getCmtconfig(job.cmtconfig)
# create the xml needed for the registration if it doesn't exist already (for a secondary log transfer)
WDTxml = "%s.xml" % (job.newDirNM)
if not os.path.exists(WDTxml):
guids_status = PFCxml(job.experiment, WDTxml, fntag="pfn", alog=job.logFile, alogguid=job.tarFileGuid, jr=jr)
else:
tolog("Log XML already exists: %s" % (WDTxml))
dblock = job.logDblock
if dblock and dblock != 'NULL' and dblock != ' ':
dsname = dblock
else:
dsname = "%s-%s-%s" % (localtime()[0:3]) # pass it a random name
rmflag = 1
ec = 0
_state = ""
_msg = ""
latereg = False
# determine the file path for special log transfers (can be overwritten in mover_put_data() in case of failure in transfer to primary OS)
if specialTransfer:
logPath, os_bucket_id = self.getLogPath(job.jobId, job.logFile, job.experiment)
if logPath == "":
tolog("!!WARNING!!4444!! Can not continue with special transfer since logPath is not set")
return False, job
tolog("Special log transfer: %s" % (logPath))
else:
logPath = ""
os_bucket_id = -1
try:
rc, pilotErrorDiag, rf, rs, N_filesNormalStageOut, N_filesAltStageOut, os_bucket_id = mover.mover_put_data("xmlcatalog_file:%s" % (WDTxml),
dsname,
site.sitename,
site.computingElement,
analysisJob = analyJob,
testLevel = self.__env['testLevel'],
proxycheck = self.__env['proxycheckFlag'],
pinitdir = self.__env['pilot_initdir'],
datasetDict = None,
outputDir = self.__env['outputDir'],
stageoutTries = self.__env['stageoutretry'],
cmtconfig = cmtconfig,
recoveryWorkDir = site.workdir,
logPath = logPath,
os_bucket_id = os_bucket_id,
copytool=copytool,
job = job,
log_transfer = True # new sitemovers required integration parameter
)
except Exception, e:
rmflag = 0 # don't remove the tarball
status = False
import traceback
if 'format_exc' in traceback.__all__:
trace = traceback.format_exc()
pilotErrorDiag = "Exception caught when saving the log tarball: %s, %s" % (str(e), trace)
else:
tolog("traceback.format_exc() not available in this python version")
pilotErrorDiag = "Exception caught when saving the log tarball: %s" % (str(e))
tolog("!!%s!!1500!! %s" % (self.__env['errorLabel'], pilotErrorDiag))
else:
tolog("mover_put_data finished with EC = %s" % str(rc))
# update transfer numbers in case alt stage-out has been used
if N_filesAltStageOut > 0:
job.filesNormalStageOut += N_filesNormalStageOut # only reported to jobMetrics in case of alt stage-out
job.filesAltStageOut += N_filesAltStageOut
tolog("Updated stage-out numbers:")
tolog("..filesNormalStageOut = %d" % (job.filesNormalStageOut))
tolog(".....filesAltStageOut = %d" % (job.filesAltStageOut))
if rc != 0:
# remove any trailing "\r" or "\n" (there can be two of them)
if rs != None:
rs = rs.rstrip()
tolog("Error string: %s" % (rs))
# ignore failed OS log transfers (this might change if we only store logs in OS:s)
if os_bucket_id != -1 and specialTransfer:
tolog("Ignoring failed special log transfer to OS (resetting log bucket id)")
os_bucket_id = -1
rc = 0
rmflag = 0 # don't remove the tarball
job.result[0] = "holding"
# is the job recoverable?
if self.__error.isRecoverableErrorCode(rc):
_state = "holding"
_msg = "WARNING"
else:
_state = "failed"
_msg = self.__env['errorLabel']
# look for special error in the error string
if rs == "Error: string Limit exceeded 250":
tolog("!!%s!!3000!! Put error: file name string limit exceeded 250" % (_msg))
ec = self.__error.ERR_LRCREGSTRSIZE
else:
ec = rc
else:
# create a weak lock file for the log transfer (but not for any special transfer, ie the log transfer to the special/secondary log area)
if not specialTransfer:
createLockFile(self.__env['jobrec'], site.workdir, lockfile="LOGFILECOPIED_%s" % job.jobId)
# to which OS bucket id was the file transferred to?
if os_bucket_id != -1:
# get the site information object
#si = getSiteInformation(experiment)
job.logBucketID = os_bucket_id #si.getBucketID(os_id, "logs")
tolog("Stored log bucket ID: %s" % (job.logBucketID))
# set the error code for the log transfer only if there was no previous error (e.g. from the get-operation)
if job.result[2] == 0:
job.result[2] = ec
job.pilotErrorDiag = pilotErrorDiag
else:
# there was a previous error
if ec != 0:
# is the new log transfer error of the same type as the earlier error?
if ec == job.result[2]:
tolog("!!WARNING!!1105!! Previous error same as new error: %d" % (ec))
else:
tolog("!!WARNING!!1105!! Previous error (%d) will not be overwritten by the new error (%d)" % (job.result[2], ec))
# ignore holding state for log transfer if previous earlier error was a get error
if job.result[0] == "holding" and not self.__error.isRecoverableErrorCode(job.result[2]):
tolog("!!WARNING!!1105!! Resetting HOLDING to FAILED since the previous error is not recoverable")
job.result[0] = "failed"
# in case the log file could not be registered, store the relevant info in the job state file
if latereg:
job.log_latereg = "True"
job.log_field = rf
else:
job.log_latereg = "False"
job.log_field = None
# tarball is saved to DDM successfully, so remove everything except the log file which might
# still be needed (for creating metadata for failed jobs)
if rmflag == 1:
if os.path.isdir(job.newDirNM):
self.removeTree(job.newDirNM)
try:
os.remove(WDTxml)
except Exception, e:
tolog("!!WARNING!!1500!! Could not remove %s: %s" % (WDTxml, str(e)))
#status = False
else:
tolog("%s removed" % (WDTxml))
elif rmflag == 0: # something bad happened during put, save the tarball on worker node for further debugging
if job.result[0] == 'holding':
tolog("Will leave log file %s for later recovery" % (job.logFile))
status = False
if os.path.isdir(job.newDirNM):
self.removeTree(job.newDirNM)
elif os.path.isdir(job.workdir) and (not job.logFile or job.logFile == ''):
try:
rmtree(job.workdir)
except Exception, e:
tolog("!!WARNING!!1500!! Could not remove %s: %s" % (job.workdir, str(e)))
pass
# do not overwrite any existing pilotErrorDiag (from a get operation e.g.)
if job.pilotErrorDiag != "" and job.pilotErrorDiag != None:
if pilotErrorDiag != "" and pilotErrorDiag != None:
# add pilotErrorDiag to the end of the existing string but do not add the log put error identifier to save space
job.pilotErrorDiag += "|Log put error: " + pilotErrorDiag
else:
if pilotErrorDiag != "" and pilotErrorDiag != None:
job.pilotErrorDiag = "Log put error: " + pilotErrorDiag
return status, job
def buildLogExtracts(self, job, workdir, analyJob):
""" Build the bulk of the log extracts """
error = PilotErrors()
tolog("Building log extracts..")
logMsg = ''
# look for the pandatracerlog.txt file, produced if the user payload attempted any outgoing connections
tracerlog = os.path.join(job.workdir, "pandatracerlog.txt")
if analyJob:
if os.path.exists(tracerlog):
# only add if file is not empty
if os.path.getsize(tracerlog) > 0:
msg = "!!WARNING!!1010!! PandaID=%s had outbound connections" % (job.jobId)
tolog(msg)
logMsg += msg
try:
f = open(tracerlog, "r")
except Exception, e:
tolog("!!WARNING!!1010!! Failed to open log file: %s, %s" % (tracerlog, e))
else:
logMsg += f.read()
f.close()
else:
tolog("Panda tracer log has zero size (no outbound connections detected)")
else:
tolog("Panda tracer log does not exist: %s (ignoring)" % (tracerlog))
# are there any special log messages from the subprocess/payload?
for thisf in job.logMsgFiles:
logMsg += returnLogMsg(logf=thisf) + "\n"
# grep for !!FAILED/WARNING!!NR!! messages in pilotlog.txt
ret = commands.getoutput('grep -e "\!\![A-Z]\+\!\![0-9]\+\!\!" %s | tail -20' % (getPilotlogFilename()))
if ret != "":
logMsg += "- %s -\n" % os.path.basename(getPilotlogFilename())
logMsg += ret + "\n"
# is this a multi-trf job?
nJobs = job.jobPars.count("\n") + 1
# loop over all payload stdout files
for _i in range(nJobs):
_stdout = job.stdout
if nJobs > 1:
_stdout = _stdout.replace(".txt", "_%d.txt" % (_i + 1))
fname = os.path.join(workdir, _stdout)
if os.path.isfile(fname):
# use the job reports for production jobs
if job.payload == "athena" and not analyJob:
# leave a filtered extracts from the first subjobs only
if _i < nJobs - 1:
# only get the error summary for
jobReport = filterJobReport(getJobReport(fname))
else:
# get the full job report for the last trf
jobReport = getJobReport(fname)
else:
jobReport = ""
if jobReport != "":
logMsg += jobReport
else:
# old style log extracts
logMsg += '\n\n- Errors from %s (no jobReport) -\n' % (_stdout)
logMsg += commands.getoutput('grep -i error %s | tail -20' % (fname))
tmp = commands.getoutput('grep -i \"Running %s failed\" %s | tail -20' % (job.payload, fname))
if len(tmp) > 0:
logMsg += '\n\n- %s errors from %s -\n' % (job.payload, _stdout)
logMsg += tmp
if job.payload == "athena":
evts = commands.getoutput('grep AthenaEventLoopMgr %s | grep end' % (fname))
evtslist = evts.split("\n")
if len(evtslist) > 1:
logMsg += '\n\n- First event -\n'
logMsg += evtslist[0]
logMsg += '\n\n- Last event -\n'
logMsg += evtslist[-1]
# if payload stdout file is too big (ec 1106), remove the file at this point
if job.result[2] == error.ERR_STDOUTTOOBIG:
try:
os.remove(fname)
except Exception, e:
tolog("!!WARNING!!1999!! Failed to remove file %s: %s" % (fname, str(e)))
else:
tolog("Too large payload stdout file has been removed")
else:
logMsg += "\n(%s/%s does not exist)" % (workdir, _stdout)
# remove duplicated warning/error messages
logMsg = removeLEDuplicates(logMsg)
return logMsg
def getXMLAndWorkdir(self, jr, siteWorkdir, jobWorkdir, newDirNM, jobId):
""" Get the metadata and the relevant workdir """
if jr:
# for lost jobs that can be recovered, the workdir has already been renamed to newDirNM
workdir = newDirNM
tolog("Post job task (job recovery mode) using dir: %s" % (workdir))
# get the metadata
strXML = getMetadata(siteWorkdir, jobId)
else:
workdir = jobWorkdir
tolog("Post job task (normal mode) using dir: %s" % (workdir))
# get the preliminary metadata (file size and checksum not yet set for log file)
strXML = getMetadata(workdir, jobId)
return strXML, workdir
def removeCoreDumps(self, siteWorkdir, workdir):
""" Remove any remaining core dumps so they do not end up in the log tarball """
foundCoreDump = False
coreDumps1 = glob("%s/core.*" % (siteWorkdir))
coreDumps2 = glob("%s/core.*" % (workdir))
coreDumps3 = glob("%s/core" % (siteWorkdir))
coreDumps4 = glob("%s/core" % (workdir))
coreDumps = coreDumps1 + coreDumps2 + coreDumps3 + coreDumps4
if coreDumps:
for coreDump in coreDumps:
tolog("Trying to remove core dump: %s" % str(coreDump))
if not remove([coreDump]):
tolog("!!WARNING!!1600!! Failed to remove core dump")
else:
tolog("Core dump removed")
foundCoreDump = True
return foundCoreDump
def removeSoftLink(self, jobPars, stdout, siteWorkdir):
""" Remove the soft link to the payload stdout """
# is this a multi-trf job?
nJobs = jobPars.count("\n") + 1
for _i in range(nJobs):
_stdout = stdout
if nJobs > 1:
_stdout = _stdout.replace(".txt", "_%d.txt" % (_i + 1))
lnfilename = os.path.join(siteWorkdir, _stdout)
if os.path.exists(lnfilename):
try:
os.remove(lnfilename)
except Exception, e:
tolog("Failed to remove soft link %s: %s" % (lnfilename, str(e)))
else:
tolog("Removed soft link: %s" % (lnfilename))
def removeUnwantedFiles(self, workdir, inFiles, outFiles):
""" Remove unwanted files from work dir prior to tarball creation """
tolog("Removing unwanted files prior to job log creation")
# remove any lingering input files from the work dir
if len(inFiles) > 0:
ec = removeFiles(workdir, inFiles)
# remove any lingering output files from the work dir
if len(outFiles) > 0:
ec = removeFiles(workdir, outFiles)
# remove any lingering athena workDir before creating the tarball
if os.path.exists(os.path.join(workdir, 'workDir')):
tolog("Removing user workDir prior to tarball creation")
try:
rmtree(os.path.join(workdir, 'workDir'))
except Exception, e:
tolog("Failed to remove workDir: %s" % str(e))
def addWantedFiles(self, jobWorkdir, siteWorkdir, jobId, outputFilesXML):
""" Add wanted files to work dir prior to tarball creation """
# add skipped input file info, if any
_skippedfname = os.path.join(jobWorkdir, "skipped.xml")
_updatedfname = os.path.join(jobWorkdir, "metadata-%s.xml" % (jobId))
if os.path.exists(_skippedfname):
ec = addSkippedToPFC(_updatedfname, _skippedfname)
# copy to site dir so it can be reached in updatePandaServer after log creation if necessary
try:
copy2(_skippedfname, siteWorkdir)
except Exception, e:
tolog("!!WARNING!!1600!! Exception caught: Could not copy skipped metadata file to site work dir: %s" % str(e))
else:
tolog("Successfully copied skipped metadata file to site work dir")
else:
tolog("No skipped input files (non DBRelease)")
# Special NG/CERNVM metadata file
fname = os.path.join(jobWorkdir, outputFilesXML)
if os.path.exists(fname):
# copy to site dir so it can be reached after the log has been created below
try:
copy2(fname, siteWorkdir)
except Exception, e:
tolog("!!WARNING!!1600!! Exception caught: Could not copy NG/CERNVM metadata file to site work dir: %s" % str(e))
else:
tolog("Successfully copied NG/CERNVM metadata file to site work dir: %s" % (siteWorkdir))
def createMetadataForOutput(self, workdir, filename, jobId, newDirNM, outputFilesXML):
""" Create the final metadata with file size and checksum of the log tarball """
# add metadata about log file to metadata.xml
from SiteMover import SiteMover
_date = "None"
strXML = ""
tolog("Preparing to create metadata for output files")
# get the file info for the log file
ec, pilotErrorDiag, _fsize, _checksum = \
SiteMover.getLocalFileInfo(os.path.join(workdir, filename), csumtype="adler32", date=_date)
if ec != 0:
tolog("!!WARNING!!2995!! Failed while trying to get the log file info: %d" % (ec))
tolog("fsize=%s" % (_fsize))
tolog("checksum=%s" % (_checksum))
JS = JobState()
_filename = JS.getFilename(workdir, jobId)
if os.path.exists(_filename):
ec, pilotErrorDiag, _fsizeAdditional, _checksumAdditional = \
SiteMover.getLocalFileInfo(_filename, csumtype="adler32", date=_date)
if ec != 0:
tolog("!!WARNING!!2995!! Failed while trying to get the additional file (%s) info: %d" % (os.path.basename(_filename), ec))
_fsizeAdditional = None
_checksumAdditional = None
else:
_fsizeAdditional = None
_checksumAdditional = None
fname = "%s/metadata-%s.xml" % (workdir, jobId)
if os.path.exists(fname):
tolog("Found metadata in site dir: %s" % (workdir))
else:
# backup solution in case metadata has not already been copied into the site work dir
tolog("Metadata not found in site work dir, looking for it in job work dir instead..")
_fname = "%s/metadata-%s.xml" % (newDirNM, jobId)
if os.path.exists(_fname):
tolog("Found metadata in job work dir: %s" % (newDirNM))
try:
copy2(_fname, workdir)
except Exception, e:
tolog("!!WARNING!!2999!! Failed to copy metadata file from job work dir to site work dir: %s" % str(e))
else:
tolog("Successfully copied metadata from job work dir to site work dir")
else:
tolog("!!WARNING!! Metadata not found in job work dir either: %s" % (fname))
# try to read the metadata from the site work dir
if os.path.exists(fname):
ec, _strXML = updateMetadata(fname, _fsize, _checksum)
if ec == 0:
tolog("Added (%s, %s) to metadata file: %s" % (_fsize, _checksum, fname))
if len(_strXML) != 0:
# replace preliminary XML
strXML = _strXML
# strXML now contains all the xml for all output files and log
else:
tolog("!!WARNING!!1601!! updateMetadata() did not return any xml")
else:
tolog("!!WARNING!!1600!! Failed to add metadata: %d" % (ec))
else:
tolog("!!WARNING!!2999!! Failed to find metadata file, expect job to eventually fail with ddm: Adder._updateOutputs() could not get GUID/LFN/MD5/FSIZE")
# add the metadata about log file to special NG/CERNVM file
fname = os.path.join(workdir, outputFilesXML)
if os.path.exists(fname):
# add checksum and file size of log file to the metadata file (OutputFiles.xml) and then transfer it
ec, _strXML = updateMetadata(fname, _fsize, _checksum, format='NG', fsizeAdditional=_fsizeAdditional, checksumAdditional=_checksumAdditional)
if ec == 0:
tolog("Added (%s, %s) to metadata file: %s" % (_fsize, _checksum, fname))
if _fsizeAdditional and _checksumAdditional:
tolog("Added (%s, %s) to metadata file: %s" % (_fsizeAdditional, _checksumAdditional, fname))
# OutputFiles.xml now contains all the xml for all output files and log (and additional file info for CERNVM)
# copy it to the init dir (only necessary for NG not for CERNVM)
# (actually it can be transferred with the mv site mover just like it is done for CERNVM, skip for now)
if os.environ.has_key('Nordugrid_pilot'):
try:
copy2(fname, self.__env['pilot_initdir'])
except Exception, e:
tolog("!!WARNING!!1600!! Exception caught: Could not copy NG metadata file to init dir: %s" % str(e))
else:
tolog("Successfully copied NG metadata file to pilot init dir: %s" % (self.__env['pilot_initdir']))
else:
tolog("updateMetadata returned: %d" % (ec))
return strXML
def addTimingInfo(self, logMsg, timeGetJob, timeStageIn, timeExe, timeStageOut, timeCleanUp):
""" Add timing info to log message """
t = '\n\n- Walltime -\n'
# t1 = timeGetJob # set in pilot.py
# t2 = timeStageIn # set in runJob.py
# t3 = timeExe # set in runJob.py
# t4 = timeStageOut # set in runJob.py (and in pilot.py moveLostOutputFiles() for recovered jobs)
# t5 = timeCleanUp # set in this function
t += 'JobRetrival=%s, StageIn=%s, Execution=%s, StageOut=%s, CleanUp=%s\n' % (timeGetJob, timeStageIn, timeExe, timeStageOut, timeCleanUp)
# keep the walltime info but truncate the log message if necessary
l = len(t)
if len(logMsg) >= 2048 - l:
logMsg = logMsg[:2048-l] + t
else:
logMsg += t
return logMsg
def transferLogExtracts(self, logMsg):
""" Write and transfer log extracts to pilot init dir for Nordugrid """
fname = "log_extracts.txt"
try:
f = open(fname, 'w')
f.write(logMsg)
f.close()
except Exception, e:
tolog("Failed to write log extracts to file: %s" % str(e))
else:
try:
copy2(fname, self.__env['pilot_initdir'])
except Exception, e:
tolog("!!WARNING!!1600!! Exception caught: Could not copy log extracts file to init dir for NG: %s" % str(e))
else:
tolog("Successfully copied log extracts file to pilot init dir for NG: %s" % (self.__env['pilot_initdir']))
def getBenchmarkDictionary(self, workdir, experiment, sitename, queuename, jobId, node):
""" Return the benchmark json dictionary """
benchmark_dictionary = {}
# get the site information object
si = getSiteInformation(experiment)
# get the benchmark dictionary if it exists
filename = si.getBenchmarkFileName(workdir)
if os.path.exists(filename):
benchmark_dictionary = getJSONDictionary(filename)
# remove unwanted information that is either useless or duplicated
if benchmark_dictionary.has_key('metadata'):
# remove later: _dummy = benchmark_dictionary['metadata'].pop('cpuname', None) # duplicated in machine section
_dummy = benchmark_dictionary['metadata'].pop('osdist', None) # duplicated in machine section
_dummy = benchmark_dictionary['metadata'].pop('pnode', None) # duplicated in machine section
_dummy = benchmark_dictionary['metadata'].pop('freetext', None) # unwanted (not set by pilot, still present in dictionary, empty)
_dummy = benchmark_dictionary['metadata'].pop('classification', None) # unwanted
_dummy = benchmark_dictionary['metadata'].pop('UID', None) # unwanted
_dummy = benchmark_dictionary.pop('_id', None) # unwanted
# add additional information to the metadata key
benchmark_dictionary['metadata']['node'] = node.nodename
benchmark_dictionary['metadata']['ATLASSite'] = sitename
benchmark_dictionary['metadata']['PanDAQueue'] = queuename
benchmark_dictionary['metadata']['PanDAID'] = int(jobId)
# add the core info as well
try:
threads_per_core, cores_per_socket, sockets = node.collectCoreInfo()
tolog("Read core info")
tolog("types:%s,%s,%s,%s"%(type(threads_per_core),type(cores_per_socket),type(sockets),type(node.isAVirtualMachine())))
benchmark_dictionary['metadata']['threadsPerCore'] = threads_per_core
benchmark_dictionary['metadata']['coresPerSocket'] = cores_per_socket
benchmark_dictionary['metadata']['sockets'] = sockets
benchmark_dictionary['metadata']['isAVM'] = str(node.isAVirtualMachine()).lower()
tolog("Added core info to benchmark dictionary: threadsPerCore=%d, coresPerSocket=%d, sockets=%d, isAVM=%s" % (threads_per_core, cores_per_socket, sockets, benchmark_dictionary['metadata']['isAVM']))
except Exception, e:
tolog("Caught exception: %s" % e)
# convert from string to int
if benchmark_dictionary['metadata'].has_key('mp_num'):
try:
benchmark_dictionary['metadata']['mp_num'] = int(benchmark_dictionary['metadata']['mp_num'])
except:
pass
# rename
benchmark_dictionary['timestamp'] = benchmark_dictionary.pop('_timestamp', None)
# convert from string to float
if benchmark_dictionary.has_key('profiles'):
if benchmark_dictionary['profiles'].has_key('whetstone'):
try:
benchmark_dictionary['profiles']['whetstone']['score'] = float(benchmark_dictionary['profiles']['whetstone']['score'])
except:
pass
if benchmark_dictionary['profiles'].has_key('fastBmk'):
try:
benchmark_dictionary['profiles']['fastBmk']['value'] = float(benchmark_dictionary['profiles']['fastBmk']['value'])
except:
pass
return benchmark_dictionary
def postJobTask(self, job, site, experiment, workerNode, jr=False, ra=0, stdout_tail=None, stdout_path=None):
"""
Update Panda server with output info (xml) and make/save the tarball of the job workdir,
only for finished or failed jobs.
jr = job recovery
ra = recovery attempt
"""
tc_0 = os.times()
# get the metadata and the relevant workdir
strXML, workdir = self.getXMLAndWorkdir(jr, site.workdir, job.workdir, job.newDirNM, job.jobId)
stagedOutES = None
if job.eventService:
try:
from json import load
f = os.path.join(job.workdir, "metadata_stagedOut_ES_%s.json" % job.jobId)
tolog("loading staged out es files status from local file: %s" % f)
if os.path.exists(f):
with open(f, 'r') as fb:
stagedOutES = load(fb)
else:
tolog("Stagedout ES status file (%s) doesn't exist" % f)
except:
tolog("Failed to load staged out es files status from local file: %s" % traceback.format_exc())
stagedOutES = None
# was the benchmark suite executed? if so, get the output dictionary and add it to the machine section of the jobReport
benchmark_dictionary = self.getBenchmarkDictionary(workdir, experiment, site.sitename, site.computingElement, job.jobId, workerNode)
if benchmark_dictionary != {}:
# Send the benchmark dictionary to ES (intermediary service)
benchmark_dictionary['type'] = 'BenchmarkData'
url = "http://uct2-collectd.mwt2.org:8080"
cmd = "curl --connect-timeout 20 --max-time 120 -H \"Content-Type: application/json\" -X POST -d \'%s\' %s" % (str(benchmark_dictionary).replace("'", '"'), url)
tolog("Executing command: %s" % (cmd))
try:
ret, output = commands.getstatusoutput(cmd)
except Exception, e:
tolog("!!WARNING!!1999!! Failed with curl command: %s" % str(e))
# Now remove the cpuname since it is repeated in the jobReport machine section (it was needed for ES)
if benchmark_dictionary.has_key('metadata'):
_dummy = benchmark_dictionary['metadata'].pop('cpuname', None)
# Add the dictionary to the jobReport
addToJobReport(workdir, "benchmark", benchmark_dictionary, section="resource", subsection="machine")
# set any holding job to failed for sites that do not use job recovery (e.g. sites with LSF, that immediately
# removes any work directory after the LSF job finishes which of course makes job recovery impossible)
if not self.__env['jobrec']:
if job.result[0] == 'holding':
job.result[0] = 'failed'
tolog("This site does not support job recovery: HOLDING state reset to FAILED")
# is it a user analysis job?
#analyJob = self.isAnalyJob(site.sitename)
analyJob = isAnalysisJob(job.trf.split(",")[0])
# build log extracts
logMsg = self.buildLogExtracts(job, workdir, analyJob)
# get the experiment object
thisExperiment = getExperiment(experiment)
# remove known redundant files and directories
thisExperiment.removeRedundantFiles(workdir)
# remove the soft link to the payload stdout
self.removeSoftLink(job.jobPars, job.stdout, site.workdir)
# make the job workdir tarball
chdir(site.workdir) # into pilot workdir, one level above job workdir
# remove the core dump file first, since it's considered as useless
foundCoreDump = self.removeCoreDumps(site.workdir, workdir)
# tar the workdir using the Panda jobId index and move it to the log dir
if os.path.isdir(workdir) and job.logFile and job.logFile != '':
# dump all directories for a failed job to the log
if job.result[0] == "failed":
cmd = 'ls -altrR %s' % workdir
tolog("%s: %s" % (cmd + '\n', commands.getoutput(cmd)))
# use the jobInfo.xml to get the trf errors (unless they were read and set already)
# (running a testEvgen job will not produce any exeError messages)
if job.exeErrorCode == 0 and job.exeErrorDiag == "":
try:
job.exeErrorCode, job.exeErrorDiag = getExeErrors(workdir, "jobInfo.xml")
except Exception, e:
tolog("!!WARNING!!1600!! Could not get the exeErrors: %s" % str(e))
job.exeErrorCode, job.exeErrorDiag = 0, ""
else:
tolog("Skipping old style trf error XML file (jobInfo.xml) since TRF errors are already set")
if not jr:
# Make the job summary report
makeJobReport(job, logMsg, foundCoreDump, self.__env['version'], self.__env['jobIds'])
# overwrite any pilotErrorDiag at this point with exeErrorDiag if set
# (for the job page error info)
if job.exeErrorDiag != "" and job.exeErrorDiag != "OK":
# this is probably useless since pilotErrorDiag might be overwritten again later
tolog("Overwriting pilotErrorDiag (\'%s\') with exeErrorDiag (\'%s\')" % (job.pilotErrorDiag, job.exeErrorDiag))
job.pilotErrorDiag = job.exeErrorDiag
# reset the trf errors since the monitor refuses to display them at the moment
#job.exeErrorDiag = ""
#job.exeErrorCode = 0
# remove unwanted files from work dir prior to tarball creation
self.removeUnwantedFiles(job.workdir, job.inFiles, job.outFiles)
# add wanted files to work dir prior to tarball creation
self.addWantedFiles(job.workdir, site.workdir, job.jobId, job.outputFilesXML)
if not job.newDirNM:
job.newDirNM = "tarball_PandaJob_%s_%s" % (job.jobId, site.sitename)
# # restore the hidden proxy if necessary
# try:
# restoreProxy()
# except Exception, e:
# tolog("Pilot failed to restore the proxy: %s" % str(e))
tolog("Preparing to create log file")
# protect the work dir until the log has been registered
createLockFile(self.__env['jobrec'], site.workdir)
# create log file and register it
if not self.createLogFile(job):
tolog("!!WARNING!!1600!! Could not create log file")
else:
# update the current file state
updateFileState(job.logFile, site.workdir, job.jobId, mode="file_state", state="created")
dumpFileStates(site.workdir, job.jobId)
# create the final metadata.xml
if not jr and job.result[0] != "failed":
strXML = self.createMetadataForOutput(site.workdir, job.logFile, job.jobId, job.newDirNM, job.outputFilesXML)
# create metadata later (in updatePandaServer) for the log at least, if it doesn't exist already
if (strXML == "" or strXML == None) and job.result[0] == 'failed':
tolog("metadata will be created for the log only in updatePandaServer")
# update the job state file
JR = JobRecovery()
if job.jobState != "stageout":
job.jobState = "stageout"
_retjs = JR.updateJobStateTest(job, site, workerNode, mode="test")
# register/copy log file
try:
ret, job = self.transferLogFile(job, site, experiment, dest=self.__env['logFileDir'], jr=jr)
except:
tolog("Failed to transfer log file: %s" % traceback.format_exc())
ret = False
if not ret:
tolog("!!%s!!1600!! Could not transfer log file" % (self.__env['errorLabel']))
job.result[0] = "holding"
else:
# the log file has been created and transferred, so it's now safe to remove the lock file
# as long as output files have been moved to local SE. It will also be removed for
# non-recoverable errors (such as 1150 = looping job, etc)
error = PilotErrors()
if not error.isPutErrorCode(job.result[2]):
self.removeLockFile(site.workdir)
else:
tolog("!!WARNING!!1600!! Job failed with EC %d - lock file will not be removed (job might be recovered by a later pilot)" % job.result[2])
# update the job state file
job.jobState = job.result[0]
_retjs = JR.updateJobStateTest(job, site, workerNode, mode="test")
tc_1 = os.times()
job.timeCleanUp = int(round(tc_1[4]-tc_0[4]))
# add timing info to log message
logMsg = self.addTimingInfo(logMsg, job.timeGetJob, job.timeStageIn, job.timeExe, job.timeStageOut, job.timeCleanUp)
# write and transfer log extracts to pilot init dir for Nordugrid
if os.environ.has_key('Nordugrid_pilot') and job.result[0] == 'failed':
self.transferLogExtracts(logMsg)
# update the SURLs info
if strXML and strXML != "":
tolog("Updating metadata XML with SURLs prior to PanDA server update")
strXML = updateXMLWithSURLs(experiment, strXML, site.workdir, job.jobId, self.__env['jobrec']) # do not use format 'NG' here (even for NG)
# was the log file transferred to an OS? check in the OS transfer dictionary
tolog("job.logBucketID: %s" % job.logBucketID)
if job.logBucketID != -1:
# get the corresponding ddm endpoint
si = getSiteInformation(experiment)
os_ddmendpoint = si.getObjectstoreDDMEndpointFromBucketID(job.logBucketID)
strXML = updateXMLWithEndpoints(strXML, [job.logFile], [os_ddmendpoint])
else:
strXML = updateXMLWithEndpoints(strXML, [job.logFile], [None])
tolog("Updated XML:\n%s" % (strXML))
# replace the metadata-<jobId>.xml file
if putMetadata(site.workdir, job.jobId, strXML):
tolog("Successfully updated metadata file")
# get the experiment object
thisExperiment = getExperiment(experiment)
# get experiment specific metadata
try:
expSpecificMetadata = thisExperiment.getExpSpecificMetadata(job, workdir)
except Exception, e:
tolog("!!WARNING!!1211!! Caught exception in getAdditionalMetadata: %s" % (e))
expSpecificMetadata = ""
# update panda server
ret, retNode = self.updatePandaServer(job, site, workerNode, self.__env['psport'],
xmlstr = strXML, log = logMsg, ra = ra, jr = jr,
schedulerID = self.__env['jobSchedulerId'],
pilotID = self.__env['pilotId'],
updateServer = self.__env['updateServerFlag'],
stdout_tail = stdout_tail,
stdout_path = stdout_path,
# stdout_tail = self.__env['stdout_tail'],
# stdout_path = self.__env['stdout_path'],
additionalMetadata = expSpecificMetadata)
if ret == 0:
tolog("Successfully updated panda server at %s" % timeStamp())
if not (os.environ.has_key('Nordugrid_pilot') or site.sitename == 'CERNVM'):
# remove the job state file for finished and failed jobs (recovery will never be necessary for them)
error = PilotErrors()
recoverable = error.isRecoverableErrorCode(job.result[2])
if job.result[0] == "finished" or (job.result[0] == "failed" and not recoverable) or \
job.result[1] != 0 or job.finalstate == "failed":
JS = JobState()
if JS.remove(site, job):
tolog("Removed job state file")
if retNode:
# store the metadata xml
retNode['xml'] = strXML
tolog("Stored XML in retNode structure")
_retjs = updateJobState(job, site, retNode)
if _retjs:
tolog("Backed up XML in job state file")
else:
tolog("updatePandaServer did not return a node structure. XML is assumed to have been sent to the server.")
else:
# if there is a server update problem at this point the job will eventually loose its heartbeat
tolog("!!WARNING!!1600!! updatePandaServer returned a %d" % (ret))
# protect the work dir until the next pilot picks up the job state file
# and properly updates the job status
# create a weak lock file to prevent cleanup from deleting the work directory
createLockFile(self.__env['jobrec'], site.workdir)
if retNode:
# store the metadata xml
retNode['xml'] = strXML
tolog("Stored XML in retNode structure")
# update the job state file with the new state information
job.result[0] = "lostheartbeat"
_retjs = updateJobState(job, site, retNode)
else:
tolog("updatePandaServer did not return a node structure. XML is assumed to have been sent to the server.")
# add the log extracts to the batch log
if logMsg != "":
tolog("Begin log extracts.......................................................................................")
tolog(logMsg)
tolog(".........................................................................................end log extracts")
else:
tolog("No available log extracts")
def updatePandaServer(self, job, site, workerNode, port, xmlstr = None, spaceReport = False,
log = None, ra = 0, jr = False, schedulerID = None, pilotID = None,
updateServer = True, stdout_tail = "", stdout_path = "", additionalMetadata = None):
""" Update the PanDA server """
# create and instantiate the client object
from PandaServerClient import PandaServerClient
client = PandaServerClient(pilot_version = self.__env['version'],
pilot_version_tag = self.__env['pilot_version_tag'],
pilot_initdir = self.__env['pilot_initdir'],
jobSchedulerId = self.__env['jobSchedulerId'],
pilotId = self.__env['pilotId'],
updateServer = self.__env['updateServerFlag'],
jobrec = self.__env['jobrec'],
pshttpurl = self.__env['pshttpurl'])
# update the panda server
return client.updatePandaServer(job, site, workerNode, port,
xmlstr = xmlstr, spaceReport = spaceReport, log = log, ra = ra, jr = jr,
useCoPilot = self.__env['useCoPilot'],
stdout_tail = stdout_tail, stdout_path = stdout_path, additionalMetadata = additionalMetadata)
def createLogFile(self, job):
""" Create the log file; rename the workdir, tar and zip it """
status = False
# copy pilotlog.txt to workdir before tar
try:
copy2(getPilotlogFilename(), job.workdir)
except Exception,e:
tolog("!!WARNING!!1400!! Could not copy pilot log to workdir: %s" % str(e))
# copy stderr to workdir before tar
try:
copy2(getPilotstderrFilename(), job.workdir)
except Exception,e:
tolog("!!WARNING!!1400!! Could not copy stderr to workdir: %s" % str(e))
# has the workdir size dictionary been created? (probably not if the job is less than ten minutes old)
workdirsize_filepath = os.path.join(job.workdir, getWorkDirSizeFilename(job.jobId))
if os.path.exists(workdirsize_filepath):
tolog("Work directory size dictionary already created: %s" % (workdirsize_filepath))
else:
tolog("Work directory size dictionary not created (will create it now)")
size = getDirSize(job.workdir)
# Store the measured disk space (the max value will later be sent with the job metrics)
status = storeWorkDirSize(size, self.__env['pilot_initdir'], job)
# input and output files should already be removed from the workdir in child process
tarballNM = "%s.tar" % (job.newDirNM)
try:
cmd = "mv %s %s" % (job.workdir, job.newDirNM)
tolog("Executing command: %s" % (cmd))
os.system(cmd)
except OSError:
tolog("!!WARNING!!1400!! Could not move job workdir %s to %s" % (job.workdir, job.newDirNM))
else:
timeout = 55*60
# add an echo $? to mask any tar error code - for now - otherwise it can cause the time-out of the tar
# to return an error code when we don't want it to, e.g. in evgen jobs that have broken soft links
# the pilot should remove the broken links before though. later, the pilot should fail if the log file
# is too big
cmd = "pwd;tar cvf %s %s --dereference; echo $?" % (tarballNM, job.newDirNM)
exitcode, output = timedCommand(cmd, timeout=timeout)
if exitcode != 0:
tolog("!!WARNING!!4343!! Log file creation failed: %d, %s" % (exitcode, output))
else:
tolog("Tarball created: %s" % (tarballNM))
cmd = "gzip -f %s" % (tarballNM)
exitcode, output = timedCommand(cmd, timeout=timeout)
if exitcode != 0:
tolog("!!WARNING!!4343!! Log file zip failed: %d, %s" % (exitcode, output))
else:
try:
os.rename("%s.gz" % (tarballNM), job.logFile)
#command = "cp %s ../" % job.logFile
#os.system(command)
except OSError:
tolog("!!WARNING!!1400!! Could not rename gzipped tarball %s" % job.logFile)
else:
tolog("Tarball renamed to %s" % (job.logFile))
status = True
return status
def removeLockFile(self, workdir, lockfile="LOCKFILE"):
""" Removal of temporary lock file after successful log registration """
# try to remove the lock file
# do not bother if the site doesn't allow for job recovery
f = "%s/%s" % (workdir, lockfile)
if self.__env['jobrec']:
try:
os.remove(f)
except Exception, e:
tolog("!!WARNING!!1000!! Failed to remove work dir lock file: %s" % str(e))
else:
if lockfile == "LOCKFILE":
tolog("Lock file removed (job work dir will be removed)")
else:
tolog("Lock file removed: %s" % (f))
def constructPathFromJobid(self, jobId):
""" Split a jobId into sub directories """
# Used by getLogPath() to generate subdirectories from a job id for the full path for PanDA job logs (centrally stored)
# E.g. 1838566890 -> 18/38/56/68/90
# Handle odd length strings
if len(jobId)%2 != 0:
# save the last character and add it separately below
last_char = jobId[-1]
else:
last_char = ""
# Create a list of sub directories, e.g. ['18', '38', '56', '68', '90']
sub_dirs = re.findall('(\d\d)', jobId)
if sub_dirs != "":
# Add the last char to the list, if any
if last_char != "":
sub_dirs.append(last_char)
# '18/38/56/68/90'
path = '/'.join(sub_dirs)
else:
tolog("!!WARNING!!4444!! Sub directories could not be created for jobId=%s" % (jobId))
return path
def getLogPath(self, jobId, logFile, experiment, primary=True):
""" Get the standard path for PanDA job logs """
# This path determines where the log will be transferred to.
# The host and base path is read from schedconfig, and can be a ,-separated list.
# If the "primary"-boolean is True, the first location will be selected. False means the second location, if any
# In case there is only one host defined in the schedconfig.logPath, primary=False is meaningless (abort).
# In case of objectstores, also the os_bucket_id will be returned (otherwise set to -1)
# Standard path
# logPaths = readpar('copytoollogPath')
# logPaths = "root://eos.cern.ch/atlas/logs,dav://bnldav.cern.ch/atlas/logs"
# logPaths = "root://eosatlas.cern.ch/atlas/logs"
os_bucket_id = -1
# Get the site information object
si = getSiteInformation(experiment)
default_ddmendpoint = si.getObjectstoreDDMEndpoint(os_bucket_name='logs')
logPaths = si.getObjectstorePath(ddmendpoint=default_ddmendpoint, label='w')
os_bucket_id = si.getObjectstoreBucketID(default_ddmendpoint)
# Handle multiple paths (primary and secondary log paths)
if "," in logPaths:
_logPaths = logPaths.split(",")
if primary:
_logPath = _logPaths[0]
else:
_logPath = _logPaths[1]
else:
if primary:
_logPath = logPaths
else:
tolog("No secondary log path is defined")
_logPath = ""
# Create the full path
if _logPath != "":
# Use the job id to generate sub directories
# path = self.constructPathFromJobid(jobId)
# Put it all together
logPath = os.path.join(_logPath, logFile)
# logPath = os.path.join(_logPath, os.path.join(jobId, logFile))
else:
logPath = ""
return logPath, os_bucket_id
|
{
"content_hash": "b4ff59a377b264be1be30f0724737de7",
"timestamp": "",
"source": "github",
"line_count": 1377,
"max_line_length": 229,
"avg_line_length": 48.620188816267245,
"alnum_prop": 0.5563256161314414,
"repo_name": "mlassnig/pilot",
"id": "84ef3a36e41fe657485e9dca0f004ab0983bf1a3",
"size": "66950",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "JobLog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4965320"
},
{
"name": "Shell",
"bytes": "23530"
}
],
"symlink_target": ""
}
|
"""Example implementation of code to run on the Cloud ML service.
"""
import traceback
import argparse
import json
import os
from . import model
import shutil
import tensorflow as tf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train_data_paths',
help = 'GCS or local path to training data',
required = True
)
parser.add_argument(
'--eval_data_paths',
help = 'GCS or local path to evaluation data',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--keras',
help = 'Use the Keras variant',
action = 'store_true'
)
# Eval arguments
parser.add_argument(
'--eval_delay_secs',
help = 'How long to wait before running first evaluation',
default = 10,
type = int
)
parser.add_argument(
'--min_eval_frequency',
help = 'Minimum number of training steps between evaluations',
default = 1,
type = int
)
args = parser.parse_args()
arguments = args.__dict__
# Unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
output_dir = arguments.pop('output_dir')
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
try:
shutil.rmtree(output_dir, ignore_errors = True) # start fresh each time
model.train_and_evaluate(output_dir, arguments['keras'])
except:
traceback.print_exc()
|
{
"content_hash": "c7d446be17ccc6e9420b449cf4bcfc94",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 25.9625,
"alnum_prop": 0.5907558979297063,
"repo_name": "GoogleCloudPlatform/training-data-analyst",
"id": "199b7eec75f22747e185e703ccb06342e685f899",
"size": "2674",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "courses/machine_learning/deepdive/05_artandscience/simplernn/trainer/task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39536"
},
{
"name": "C#",
"bytes": "23445"
},
{
"name": "C++",
"bytes": "30926"
},
{
"name": "CSS",
"bytes": "53087"
},
{
"name": "Dockerfile",
"bytes": "90856"
},
{
"name": "Go",
"bytes": "93755"
},
{
"name": "HCL",
"bytes": "73891"
},
{
"name": "HTML",
"bytes": "2342167"
},
{
"name": "Java",
"bytes": "2441030"
},
{
"name": "JavaScript",
"bytes": "3957504"
},
{
"name": "Jinja",
"bytes": "257585"
},
{
"name": "Jsonnet",
"bytes": "5696"
},
{
"name": "Jupyter Notebook",
"bytes": "242016061"
},
{
"name": "Makefile",
"bytes": "12642"
},
{
"name": "PigLatin",
"bytes": "11558"
},
{
"name": "Pug",
"bytes": "457977"
},
{
"name": "Python",
"bytes": "18543833"
},
{
"name": "R",
"bytes": "68"
},
{
"name": "Scala",
"bytes": "27161"
},
{
"name": "Shell",
"bytes": "763259"
},
{
"name": "TypeScript",
"bytes": "66858"
}
],
"symlink_target": ""
}
|
"""The tests for the logbook component."""
# pylint: disable=protected-access,invalid-name
from datetime import datetime, timedelta
import logging
import unittest
import pytest
import voluptuous as vol
from homeassistant.components import logbook, recorder, sun
from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME
from homeassistant.components.homekit.const import (
ATTR_DISPLAY_NAME,
ATTR_VALUE,
DOMAIN as DOMAIN_HOMEKIT,
EVENT_HOMEKIT_CHANGED,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_HIDDEN,
ATTR_NAME,
ATTR_SERVICE,
EVENT_AUTOMATION_TRIGGERED,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
EVENT_SCRIPT_STARTED,
EVENT_STATE_CHANGED,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component, setup_component
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant, init_recorder_component
_LOGGER = logging.getLogger(__name__)
class TestComponentLogbook(unittest.TestCase):
"""Test the History component."""
EMPTY_CONFIG = logbook.CONFIG_SCHEMA({logbook.DOMAIN: {}})
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
init_recorder_component(self.hass) # Force an in memory DB
assert setup_component(self.hass, logbook.DOMAIN, self.EMPTY_CONFIG)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_service_call_create_logbook_entry(self):
"""Test if service call create log book entry."""
calls = []
@ha.callback
def event_listener(event):
"""Append on event."""
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(
logbook.DOMAIN,
"log",
{
logbook.ATTR_NAME: "Alarm",
logbook.ATTR_MESSAGE: "is triggered",
logbook.ATTR_DOMAIN: "switch",
logbook.ATTR_ENTITY_ID: "switch.test_switch",
},
True,
)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
self.hass.block_till_done()
self.hass.data[recorder.DATA_INSTANCE].block_till_done()
events = list(
logbook._get_events(
self.hass,
{},
dt_util.utcnow() - timedelta(hours=1),
dt_util.utcnow() + timedelta(hours=1),
)
)
assert len(events) == 1
assert 1 == len(calls)
last_call = calls[-1]
assert "Alarm" == last_call.data.get(logbook.ATTR_NAME)
assert "is triggered" == last_call.data.get(logbook.ATTR_MESSAGE)
assert "switch" == last_call.data.get(logbook.ATTR_DOMAIN)
assert "switch.test_switch" == last_call.data.get(logbook.ATTR_ENTITY_ID)
def test_service_call_create_log_book_entry_no_message(self):
"""Test if service call create log book entry without message."""
calls = []
@ha.callback
def event_listener(event):
"""Append on event."""
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
with pytest.raises(vol.Invalid):
self.hass.services.call(logbook.DOMAIN, "log", {}, True)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
self.hass.block_till_done()
assert 0 == len(calls)
def test_humanify_filter_sensor(self):
"""Test humanify filter too frequent sensor values."""
entity_id = "sensor.bla"
pointA = dt_util.utcnow().replace(minute=2)
pointB = pointA.replace(minute=5)
pointC = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id, 20)
eventC = self.create_state_changed_event(pointC, entity_id, 30)
entries = list(logbook.humanify(self.hass, (eventA, eventB, eventC)))
assert 2 == len(entries)
self.assert_entry(
entries[0], pointB, "bla", domain="sensor", entity_id=entity_id
)
self.assert_entry(
entries[1], pointC, "bla", domain="sensor", entity_id=entity_id
)
def test_filter_continuous_sensor_values(self):
"""Test remove continuous sensor events from logbook."""
entity_id = "sensor.bla"
pointA = dt_util.utcnow()
attributes = {"unit_of_measurement": "foo"}
eventA = self.create_state_changed_event(pointA, entity_id, 10, attributes)
entries = list(logbook.humanify(self.hass, (eventA,)))
assert 0 == len(entries)
def test_exclude_new_entities(self):
"""Test if events are excluded on first update."""
entity_id = "sensor.bla"
entity_id2 = "sensor.blu"
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
eventA.data["old_state"] = None
entities_filter = logbook._generate_filter_from_config({})
events = [
e
for e in (ha.Event(EVENT_HOMEASSISTANT_STOP), eventA, eventB)
if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 2 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="stopped", domain=ha.DOMAIN
)
self.assert_entry(
entries[1], pointB, "blu", domain="sensor", entity_id=entity_id2
)
def test_exclude_removed_entities(self):
"""Test if events are excluded on last update."""
entity_id = "sensor.bla"
entity_id2 = "sensor.blu"
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
eventA.data["new_state"] = None
entities_filter = logbook._generate_filter_from_config({})
events = [
e
for e in (ha.Event(EVENT_HOMEASSISTANT_STOP), eventA, eventB)
if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 2 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="stopped", domain=ha.DOMAIN
)
self.assert_entry(
entries[1], pointB, "blu", domain="sensor", entity_id=entity_id2
)
def test_exclude_events_hidden(self):
"""Test if events are excluded if entity is hidden."""
entity_id = "sensor.bla"
entity_id2 = "sensor.blu"
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(
pointA, entity_id, 10, {ATTR_HIDDEN: "true"}
)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
entities_filter = logbook._generate_filter_from_config({})
events = [
e
for e in (ha.Event(EVENT_HOMEASSISTANT_STOP), eventA, eventB)
if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 2 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="stopped", domain=ha.DOMAIN
)
self.assert_entry(
entries[1], pointB, "blu", domain="sensor", entity_id=entity_id2
)
def test_exclude_events_entity(self):
"""Test if events are filtered if entity is excluded in config."""
entity_id = "sensor.bla"
entity_id2 = "sensor.blu"
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
logbook.CONF_EXCLUDE: {logbook.CONF_ENTITIES: [entity_id]}
},
}
)
entities_filter = logbook._generate_filter_from_config(config[logbook.DOMAIN])
events = [
e
for e in (ha.Event(EVENT_HOMEASSISTANT_STOP), eventA, eventB)
if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 2 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="stopped", domain=ha.DOMAIN
)
self.assert_entry(
entries[1], pointB, "blu", domain="sensor", entity_id=entity_id2
)
def test_exclude_events_domain(self):
"""Test if events are filtered if domain is excluded in config."""
entity_id = "switch.bla"
entity_id2 = "sensor.blu"
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
logbook.CONF_EXCLUDE: {
logbook.CONF_DOMAINS: ["switch", "alexa", DOMAIN_HOMEKIT]
}
},
}
)
entities_filter = logbook._generate_filter_from_config(config[logbook.DOMAIN])
events = [
e
for e in (
ha.Event(EVENT_HOMEASSISTANT_START),
ha.Event(EVENT_ALEXA_SMART_HOME),
ha.Event(EVENT_HOMEKIT_CHANGED),
eventA,
eventB,
)
if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 2 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
self.assert_entry(
entries[1], pointB, "blu", domain="sensor", entity_id=entity_id2
)
def test_exclude_automation_events(self):
"""Test if automation entries can be excluded by entity_id."""
name = "My Automation Rule"
domain = "automation"
entity_id = "automation.my_automation_rule"
entity_id2 = "automation.my_automation_rule_2"
entity_id2 = "sensor.blu"
eventA = ha.Event(
logbook.EVENT_AUTOMATION_TRIGGERED,
{logbook.ATTR_NAME: name, logbook.ATTR_ENTITY_ID: entity_id},
)
eventB = ha.Event(
logbook.EVENT_AUTOMATION_TRIGGERED,
{logbook.ATTR_NAME: name, logbook.ATTR_ENTITY_ID: entity_id2},
)
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
logbook.CONF_EXCLUDE: {logbook.CONF_ENTITIES: [entity_id]}
},
}
)
entities_filter = logbook._generate_filter_from_config(config[logbook.DOMAIN])
events = [
e
for e in (ha.Event(EVENT_HOMEASSISTANT_STOP), eventA, eventB)
if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 2 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="stopped", domain=ha.DOMAIN
)
self.assert_entry(entries[1], name=name, domain=domain, entity_id=entity_id2)
def test_exclude_script_events(self):
"""Test if script start can be excluded by entity_id."""
name = "My Script Rule"
domain = "script"
entity_id = "script.my_script"
entity_id2 = "script.my_script_2"
entity_id2 = "sensor.blu"
eventA = ha.Event(
logbook.EVENT_SCRIPT_STARTED,
{logbook.ATTR_NAME: name, logbook.ATTR_ENTITY_ID: entity_id},
)
eventB = ha.Event(
logbook.EVENT_SCRIPT_STARTED,
{logbook.ATTR_NAME: name, logbook.ATTR_ENTITY_ID: entity_id2},
)
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
logbook.CONF_EXCLUDE: {logbook.CONF_ENTITIES: [entity_id]}
},
}
)
entities_filter = logbook._generate_filter_from_config(config[logbook.DOMAIN])
events = [
e
for e in (ha.Event(EVENT_HOMEASSISTANT_STOP), eventA, eventB)
if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 2 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="stopped", domain=ha.DOMAIN
)
self.assert_entry(entries[1], name=name, domain=domain, entity_id=entity_id2)
def test_include_events_entity(self):
"""Test if events are filtered if entity is included in config."""
entity_id = "sensor.bla"
entity_id2 = "sensor.blu"
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
logbook.CONF_INCLUDE: {logbook.CONF_ENTITIES: [entity_id2]}
},
}
)
entities_filter = logbook._generate_filter_from_config(config[logbook.DOMAIN])
events = [
e
for e in (ha.Event(EVENT_HOMEASSISTANT_STOP), eventA, eventB)
if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 2 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="stopped", domain=ha.DOMAIN
)
self.assert_entry(
entries[1], pointB, "blu", domain="sensor", entity_id=entity_id2
)
def test_include_events_domain(self):
"""Test if events are filtered if domain is included in config."""
entity_id = "switch.bla"
entity_id2 = "sensor.blu"
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
event_alexa = ha.Event(
EVENT_ALEXA_SMART_HOME,
{"request": {"namespace": "Alexa.Discovery", "name": "Discover"}},
)
event_homekit = ha.Event(
EVENT_HOMEKIT_CHANGED,
{
ATTR_ENTITY_ID: "lock.front_door",
ATTR_DISPLAY_NAME: "Front Door",
ATTR_SERVICE: "lock",
},
)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
logbook.CONF_INCLUDE: {
logbook.CONF_DOMAINS: ["sensor", "alexa", DOMAIN_HOMEKIT]
}
},
}
)
entities_filter = logbook._generate_filter_from_config(config[logbook.DOMAIN])
events = [
e
for e in (
ha.Event(EVENT_HOMEASSISTANT_START),
event_alexa,
event_homekit,
eventA,
eventB,
)
if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 4 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
self.assert_entry(entries[1], name="Amazon Alexa", domain="alexa")
self.assert_entry(entries[2], name="HomeKit", domain=DOMAIN_HOMEKIT)
self.assert_entry(
entries[3], pointB, "blu", domain="sensor", entity_id=entity_id2
)
def test_include_exclude_events(self):
"""Test if events are filtered if include and exclude is configured."""
entity_id = "switch.bla"
entity_id2 = "sensor.blu"
entity_id3 = "sensor.bli"
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA1 = self.create_state_changed_event(pointA, entity_id, 10)
eventA2 = self.create_state_changed_event(pointA, entity_id2, 10)
eventA3 = self.create_state_changed_event(pointA, entity_id3, 10)
eventB1 = self.create_state_changed_event(pointB, entity_id, 20)
eventB2 = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
logbook.CONF_INCLUDE: {
logbook.CONF_DOMAINS: ["sensor"],
logbook.CONF_ENTITIES: ["switch.bla"],
},
logbook.CONF_EXCLUDE: {
logbook.CONF_DOMAINS: ["switch"],
logbook.CONF_ENTITIES: ["sensor.bli"],
},
},
}
)
entities_filter = logbook._generate_filter_from_config(config[logbook.DOMAIN])
events = [
e
for e in (
ha.Event(EVENT_HOMEASSISTANT_START),
eventA1,
eventA2,
eventA3,
eventB1,
eventB2,
)
if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 5 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
self.assert_entry(
entries[1], pointA, "bla", domain="switch", entity_id=entity_id
)
self.assert_entry(
entries[2], pointA, "blu", domain="sensor", entity_id=entity_id2
)
self.assert_entry(
entries[3], pointB, "bla", domain="switch", entity_id=entity_id
)
self.assert_entry(
entries[4], pointB, "blu", domain="sensor", entity_id=entity_id2
)
def test_exclude_auto_groups(self):
"""Test if events of automatically generated groups are filtered."""
entity_id = "switch.bla"
entity_id2 = "group.switches"
pointA = dt_util.utcnow()
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointA, entity_id2, 20, {"auto": True})
entities_filter = logbook._generate_filter_from_config({})
events = [
e for e in (eventA, eventB) if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 1 == len(entries)
self.assert_entry(
entries[0], pointA, "bla", domain="switch", entity_id=entity_id
)
def test_exclude_attribute_changes(self):
"""Test if events of attribute changes are filtered."""
entity_id = "switch.bla"
entity_id2 = "switch.blu"
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=1)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(
pointA, entity_id2, 20, last_changed=pointA, last_updated=pointB
)
entities_filter = logbook._generate_filter_from_config({})
events = [
e for e in (eventA, eventB) if logbook._keep_event(e, entities_filter)
]
entries = list(logbook.humanify(self.hass, events))
assert 1 == len(entries)
self.assert_entry(
entries[0], pointA, "bla", domain="switch", entity_id=entity_id
)
def test_home_assistant_start_stop_grouped(self):
"""Test if HA start and stop events are grouped.
Events that are occurring in the same minute.
"""
entries = list(
logbook.humanify(
self.hass,
(
ha.Event(EVENT_HOMEASSISTANT_STOP),
ha.Event(EVENT_HOMEASSISTANT_START),
),
)
)
assert 1 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="restarted", domain=ha.DOMAIN
)
def test_home_assistant_start(self):
"""Test if HA start is not filtered or converted into a restart."""
entity_id = "switch.bla"
pointA = dt_util.utcnow()
entries = list(
logbook.humanify(
self.hass,
(
ha.Event(EVENT_HOMEASSISTANT_START),
self.create_state_changed_event(pointA, entity_id, 10),
),
)
)
assert 2 == len(entries)
self.assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
self.assert_entry(
entries[1], pointA, "bla", domain="switch", entity_id=entity_id
)
def test_entry_message_from_state_device(self):
"""Test if logbook message is correctly created for switches.
Especially test if the special handling for turn on/off events is done.
"""
pointA = dt_util.utcnow()
# message for a device state change
eventA = self.create_state_changed_event(pointA, "switch.bla", 10)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "changed to 10" == message
# message for a switch turned on
eventA = self.create_state_changed_event(pointA, "switch.bla", STATE_ON)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "turned on" == message
# message for a switch turned off
eventA = self.create_state_changed_event(pointA, "switch.bla", STATE_OFF)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "turned off" == message
def test_entry_message_from_state_device_tracker(self):
"""Test if logbook message is correctly created for device tracker."""
pointA = dt_util.utcnow()
# message for a device tracker "not home" state
eventA = self.create_state_changed_event(
pointA, "device_tracker.john", STATE_NOT_HOME
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is away" == message
# message for a device tracker "home" state
eventA = self.create_state_changed_event(pointA, "device_tracker.john", "work")
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is at work" == message
def test_entry_message_from_state_person(self):
"""Test if logbook message is correctly created for a person."""
pointA = dt_util.utcnow()
# message for a device tracker "not home" state
eventA = self.create_state_changed_event(pointA, "person.john", STATE_NOT_HOME)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is away" == message
# message for a device tracker "home" state
eventA = self.create_state_changed_event(pointA, "person.john", "work")
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is at work" == message
def test_entry_message_from_state_sun(self):
"""Test if logbook message is correctly created for sun."""
pointA = dt_util.utcnow()
# message for a sun rise
eventA = self.create_state_changed_event(
pointA, "sun.sun", sun.STATE_ABOVE_HORIZON
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "has risen" == message
# message for a sun set
eventA = self.create_state_changed_event(
pointA, "sun.sun", sun.STATE_BELOW_HORIZON
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "has set" == message
def test_entry_message_from_state_binary_sensor_battery(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "battery"}
# message for a binary_sensor battery "low" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.battery", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is low" == message
# message for a binary_sensor battery "normal" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.battery", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is normal" == message
def test_entry_message_from_state_binary_sensor_connectivity(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "connectivity"}
# message for a binary_sensor connectivity "connected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.connectivity", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is connected" == message
# message for a binary_sensor connectivity "disconnected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.connectivity", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is disconnected" == message
def test_entry_message_from_state_binary_sensor_door(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "door"}
# message for a binary_sensor door "open" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.door", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is opened" == message
# message for a binary_sensor door "closed" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.door", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is closed" == message
def test_entry_message_from_state_binary_sensor_garage_door(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "garage_door"}
# message for a binary_sensor garage_door "open" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.garage_door", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is opened" == message
# message for a binary_sensor garage_door "closed" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.garage_door", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is closed" == message
def test_entry_message_from_state_binary_sensor_opening(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "opening"}
# message for a binary_sensor opening "open" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.opening", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is opened" == message
# message for a binary_sensor opening "closed" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.opening", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is closed" == message
def test_entry_message_from_state_binary_sensor_window(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "window"}
# message for a binary_sensor window "open" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.window", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is opened" == message
# message for a binary_sensor window "closed" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.window", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is closed" == message
def test_entry_message_from_state_binary_sensor_lock(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "lock"}
# message for a binary_sensor lock "unlocked" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.lock", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is unlocked" == message
# message for a binary_sensor lock "locked" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.lock", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is locked" == message
def test_entry_message_from_state_binary_sensor_plug(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "plug"}
# message for a binary_sensor plug "unpluged" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.plug", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is plugged in" == message
# message for a binary_sensor plug "pluged" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.plug", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is unplugged" == message
def test_entry_message_from_state_binary_sensor_presence(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "presence"}
# message for a binary_sensor presence "home" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.presence", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is at home" == message
# message for a binary_sensor presence "away" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.presence", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is away" == message
def test_entry_message_from_state_binary_sensor_safety(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "safety"}
# message for a binary_sensor safety "unsafe" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.safety", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is unsafe" == message
# message for a binary_sensor safety "safe" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.safety", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "is safe" == message
def test_entry_message_from_state_binary_sensor_cold(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "cold"}
# message for a binary_sensor cold "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.cold", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected cold" == message
# message for a binary_sensori cold "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.cold", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no cold detected)" == message
def test_entry_message_from_state_binary_sensor_gas(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "gas"}
# message for a binary_sensor gas "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.gas", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected gas" == message
# message for a binary_sensori gas "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.gas", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no gas detected)" == message
def test_entry_message_from_state_binary_sensor_heat(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "heat"}
# message for a binary_sensor heat "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.heat", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected heat" == message
# message for a binary_sensori heat "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.heat", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no heat detected)" == message
def test_entry_message_from_state_binary_sensor_light(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "light"}
# message for a binary_sensor light "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.light", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected light" == message
# message for a binary_sensori light "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.light", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no light detected)" == message
def test_entry_message_from_state_binary_sensor_moisture(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "moisture"}
# message for a binary_sensor moisture "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.moisture", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected moisture" == message
# message for a binary_sensori moisture "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.moisture", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no moisture detected)" == message
def test_entry_message_from_state_binary_sensor_motion(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "motion"}
# message for a binary_sensor motion "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.motion", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected motion" == message
# message for a binary_sensori motion "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.motion", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no motion detected)" == message
def test_entry_message_from_state_binary_sensor_occupancy(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "occupancy"}
# message for a binary_sensor occupancy "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.occupancy", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected occupancy" == message
# message for a binary_sensori occupancy "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.occupancy", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no occupancy detected)" == message
def test_entry_message_from_state_binary_sensor_power(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "power"}
# message for a binary_sensor power "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.power", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected power" == message
# message for a binary_sensori power "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.power", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no power detected)" == message
def test_entry_message_from_state_binary_sensor_problem(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "problem"}
# message for a binary_sensor problem "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.problem", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected problem" == message
# message for a binary_sensori problem "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.problem", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no problem detected)" == message
def test_entry_message_from_state_binary_sensor_smoke(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "smoke"}
# message for a binary_sensor smoke "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.smoke", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected smoke" == message
# message for a binary_sensori smoke "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.smoke", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no smoke detected)" == message
def test_entry_message_from_state_binary_sensor_sound(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "sound"}
# message for a binary_sensor sound "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.sound", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected sound" == message
# message for a binary_sensori sound "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.sound", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no sound detected)" == message
def test_entry_message_from_state_binary_sensor_vibration(self):
"""Test if logbook message is correctly created for a binary_sensor."""
pointA = dt_util.utcnow()
attributes = {"device_class": "vibration"}
# message for a binary_sensor vibration "detected" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.vibration", STATE_ON, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "detected vibration" == message
# message for a binary_sensori vibration "cleared" state
eventA = self.create_state_changed_event(
pointA, "binary_sensor.vibration", STATE_OFF, attributes
)
to_state = ha.State.from_dict(eventA.data.get("new_state"))
message = logbook._entry_message_from_state(to_state.domain, to_state)
assert "cleared (no vibration detected)" == message
def test_process_custom_logbook_entries(self):
"""Test if custom log book entries get added as an entry."""
name = "Nice name"
message = "has a custom entry"
entity_id = "sun.sun"
entries = list(
logbook.humanify(
self.hass,
(
ha.Event(
logbook.EVENT_LOGBOOK_ENTRY,
{
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_ENTITY_ID: entity_id,
},
),
),
)
)
assert 1 == len(entries)
self.assert_entry(
entries[0], name=name, message=message, domain="sun", entity_id=entity_id
)
def assert_entry(
self, entry, when=None, name=None, message=None, domain=None, entity_id=None
):
"""Assert an entry is what is expected."""
if when:
assert when == entry["when"]
if name:
assert name == entry["name"]
if message:
assert message == entry["message"]
if domain:
assert domain == entry["domain"]
if entity_id:
assert entity_id == entry["entity_id"]
def create_state_changed_event(
self,
event_time_fired,
entity_id,
state,
attributes=None,
last_changed=None,
last_updated=None,
):
"""Create state changed event."""
# Logbook only cares about state change events that
# contain an old state but will not actually act on it.
state = ha.State(
entity_id, state, attributes, last_changed, last_updated
).as_dict()
return ha.Event(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": state, "new_state": state},
time_fired=event_time_fired,
)
async def test_logbook_view(hass, hass_client):
"""Test the logbook view."""
await hass.async_add_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_add_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get("/api/logbook/{}".format(dt_util.utcnow().isoformat()))
assert response.status == 200
async def test_logbook_view_period_entity(hass, hass_client):
"""Test the logbook view with period and entity."""
await hass.async_add_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_add_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
entity_id_test = "switch.test"
hass.states.async_set(entity_id_test, STATE_OFF)
hass.states.async_set(entity_id_test, STATE_ON)
entity_id_second = "switch.second"
hass.states.async_set(entity_id_second, STATE_OFF)
hass.states.async_set(entity_id_second, STATE_ON)
await hass.async_block_till_done()
await hass.async_add_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries without filters
response = await client.get("/api/logbook/{}".format(start_date.isoformat()))
assert response.status == 200
json = await response.json()
assert len(json) == 2
assert json[0]["entity_id"] == entity_id_test
assert json[1]["entity_id"] == entity_id_second
# Test today entries with filter by period
response = await client.get(
"/api/logbook/{}?period=1".format(start_date.isoformat())
)
assert response.status == 200
json = await response.json()
assert len(json) == 2
assert json[0]["entity_id"] == entity_id_test
assert json[1]["entity_id"] == entity_id_second
# Test today entries with filter by entity_id
response = await client.get(
"/api/logbook/{}?entity=switch.test".format(start_date.isoformat())
)
assert response.status == 200
json = await response.json()
assert len(json) == 1
assert json[0]["entity_id"] == entity_id_test
# Test entries for 3 days with filter by entity_id
response = await client.get(
"/api/logbook/{}?period=3&entity=switch.test".format(start_date.isoformat())
)
assert response.status == 200
json = await response.json()
assert len(json) == 1
assert json[0]["entity_id"] == entity_id_test
# Tomorrow time 00:00:00
start = (dt_util.utcnow() + timedelta(days=1)).date()
start_date = datetime(start.year, start.month, start.day)
# Test tomorrow entries without filters
response = await client.get("/api/logbook/{}".format(start_date.isoformat()))
assert response.status == 200
json = await response.json()
assert len(json) == 0
# Test tomorrow entries with filter by entity_id
response = await client.get(
"/api/logbook/{}?entity=switch.test".format(start_date.isoformat())
)
assert response.status == 200
json = await response.json()
assert len(json) == 0
# Test entries from tomorrow to 3 days ago with filter by entity_id
response = await client.get(
"/api/logbook/{}?period=3&entity=switch.test".format(start_date.isoformat())
)
assert response.status == 200
json = await response.json()
assert len(json) == 1
assert json[0]["entity_id"] == entity_id_test
async def test_humanify_alexa_event(hass):
"""Test humanifying Alexa event."""
hass.states.async_set("light.kitchen", "on", {"friendly_name": "Kitchen Light"})
results = list(
logbook.humanify(
hass,
[
ha.Event(
EVENT_ALEXA_SMART_HOME,
{"request": {"namespace": "Alexa.Discovery", "name": "Discover"}},
),
ha.Event(
EVENT_ALEXA_SMART_HOME,
{
"request": {
"namespace": "Alexa.PowerController",
"name": "TurnOn",
"entity_id": "light.kitchen",
}
},
),
ha.Event(
EVENT_ALEXA_SMART_HOME,
{
"request": {
"namespace": "Alexa.PowerController",
"name": "TurnOn",
"entity_id": "light.non_existing",
}
},
),
],
)
)
event1, event2, event3 = results
assert event1["name"] == "Amazon Alexa"
assert event1["message"] == "send command Alexa.Discovery/Discover"
assert event1["entity_id"] is None
assert event2["name"] == "Amazon Alexa"
assert (
event2["message"]
== "send command Alexa.PowerController/TurnOn for Kitchen Light"
)
assert event2["entity_id"] == "light.kitchen"
assert event3["name"] == "Amazon Alexa"
assert (
event3["message"]
== "send command Alexa.PowerController/TurnOn for light.non_existing"
)
assert event3["entity_id"] == "light.non_existing"
async def test_humanify_homekit_changed_event(hass):
"""Test humanifying HomeKit changed event."""
event1, event2 = list(
logbook.humanify(
hass,
[
ha.Event(
EVENT_HOMEKIT_CHANGED,
{
ATTR_ENTITY_ID: "lock.front_door",
ATTR_DISPLAY_NAME: "Front Door",
ATTR_SERVICE: "lock",
},
),
ha.Event(
EVENT_HOMEKIT_CHANGED,
{
ATTR_ENTITY_ID: "cover.window",
ATTR_DISPLAY_NAME: "Window",
ATTR_SERVICE: "set_cover_position",
ATTR_VALUE: 75,
},
),
],
)
)
assert event1["name"] == "HomeKit"
assert event1["domain"] == DOMAIN_HOMEKIT
assert event1["message"] == "send command lock for Front Door"
assert event1["entity_id"] == "lock.front_door"
assert event2["name"] == "HomeKit"
assert event2["domain"] == DOMAIN_HOMEKIT
assert event2["message"] == "send command set_cover_position to 75 for Window"
assert event2["entity_id"] == "cover.window"
async def test_humanify_automation_triggered_event(hass):
"""Test humanifying Automation Trigger event."""
event1, event2 = list(
logbook.humanify(
hass,
[
ha.Event(
EVENT_AUTOMATION_TRIGGERED,
{ATTR_ENTITY_ID: "automation.hello", ATTR_NAME: "Hello Automation"},
),
ha.Event(
EVENT_AUTOMATION_TRIGGERED,
{ATTR_ENTITY_ID: "automation.bye", ATTR_NAME: "Bye Automation"},
),
],
)
)
assert event1["name"] == "Hello Automation"
assert event1["domain"] == "automation"
assert event1["message"] == "has been triggered"
assert event1["entity_id"] == "automation.hello"
assert event2["name"] == "Bye Automation"
assert event2["domain"] == "automation"
assert event2["message"] == "has been triggered"
assert event2["entity_id"] == "automation.bye"
async def test_humanify_script_started_event(hass):
"""Test humanifying Script Run event."""
event1, event2 = list(
logbook.humanify(
hass,
[
ha.Event(
EVENT_SCRIPT_STARTED,
{ATTR_ENTITY_ID: "script.hello", ATTR_NAME: "Hello Script"},
),
ha.Event(
EVENT_SCRIPT_STARTED,
{ATTR_ENTITY_ID: "script.bye", ATTR_NAME: "Bye Script"},
),
],
)
)
assert event1["name"] == "Hello Script"
assert event1["domain"] == "script"
assert event1["message"] == "started"
assert event1["entity_id"] == "script.hello"
assert event2["name"] == "Bye Script"
assert event2["domain"] == "script"
assert event2["message"] == "started"
assert event2["entity_id"] == "script.bye"
|
{
"content_hash": "390caf2eb42caf8a4bb149ab10e50ddb",
"timestamp": "",
"source": "github",
"line_count": 1486,
"max_line_length": 88,
"avg_line_length": 39.331763122476445,
"alnum_prop": 0.5929816757061953,
"repo_name": "Teagan42/home-assistant",
"id": "70e769a54f2d58341c645be8015147397e929f77",
"size": "58447",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/logbook/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
"""
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.template import loader, RequestContext
from django.http import HttpResponse, Http404
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.core import urlresolvers
def render_to_response(*args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
httpresponse_kwargs = {'content_type': kwargs.pop('mimetype', None)}
return HttpResponse(loader.render_to_string(*args, **kwargs), **httpresponse_kwargs)
def render(request, *args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
httpresponse_kwargs = {
'content_type': kwargs.pop('content_type', None),
'status': kwargs.pop('status', None),
}
if 'context_instance' in kwargs:
context_instance = kwargs.pop('context_instance')
if kwargs.get('current_app', None):
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
current_app = kwargs.pop('current_app', None)
context_instance = RequestContext(request, current_app=current_app)
kwargs['context_instance'] = context_instance
return HttpResponse(loader.render_to_string(*args, **kwargs),
**httpresponse_kwargs)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
else:
manager = klass._default_manager
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
|
{
"content_hash": "8a6fe3249820ca35d9309bf9fa98b3e9",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 90,
"avg_line_length": 34.827586206896555,
"alnum_prop": 0.667920792079208,
"repo_name": "chrisfranzen/django",
"id": "0746e843a356bcdde8d3775e36f12caf8e338831",
"size": "5050",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/shortcuts/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42663"
},
{
"name": "HTML",
"bytes": "95024"
},
{
"name": "JavaScript",
"bytes": "94313"
},
{
"name": "Python",
"bytes": "8216479"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.