ngram
listlengths
0
67.8k
[ "in range(1, len(a)): if prev > a[i]: print \"FAIL: Array not sorted properly\"", "range(1, len(a)): if prev > a[i]: print \"FAIL: Array not sorted properly\" print", "\"--SUCCESS: Sorted!--\" if __name__ == \"__main__\": test = [] random.seed() for i in", "random.seed() for i in range(20): test.append(random.randint(0, 100)) print test Heapsort.sort(test) print test Heapsort.verify(test)", "random class Heapsort: \"\"\"Heapsort\"\"\" @staticmethod def bubble_down(a, begin, end): root = begin child", "< a[child + 1])): child += 1 if (a[root] < a[child]): a[root], a[child]", "end): root = begin child = root * 2 + 1 while (child", "sort(a): end = len(a) - 1 Heapsort.heapify_max(a) while (end > 0): a[0], a[end]", "= (end - 1) // 2 while (begin >= 0): Heapsort.bubble_down(a, begin, end)", "+ 1])): child += 1 if (a[root] < a[child]): a[root], a[child] = a[child],", "end) begin -= 1 @staticmethod def sort(a): end = len(a) - 1 Heapsort.heapify_max(a)", "root * 2 + 1 while (child <= end): if ((child + 1", ">= 0): Heapsort.bubble_down(a, begin, end) begin -= 1 @staticmethod def sort(a): end =", "@staticmethod def bubble_down(a, begin, end): root = begin child = root * 2", "heapify_max(a): end = len(a) - 1 begin = (end - 1) // 2", "a[root], a[child] = a[child], a[root] root = child child = root * 2", "if __name__ == \"__main__\": test = [] random.seed() for i in range(20): test.append(random.randint(0,", "1 if (a[root] < a[child]): a[root], a[child] = a[child], a[root] root = child", "end): if ((child + 1 <= end) and (a[child] < a[child + 1])):", "< a[child]): a[root], a[child] = a[child], a[root] root = child child = root", "class Heapsort: \"\"\"Heapsort\"\"\" @staticmethod def bubble_down(a, begin, end): root = begin child =", "1 <= end) and (a[child] < a[child + 1])): child += 1 if", "Array not sorted properly\" print \"--SUCCESS: Sorted!--\" if __name__ == \"__main__\": test =", "begin -= 1 @staticmethod def sort(a): end = len(a) - 1 Heapsort.heapify_max(a) while", "a[child]): a[root], a[child] = a[child], a[root] root = child child = root *", "root * 2 + 1 else: return @staticmethod def heapify_max(a): end = len(a)", "== \"__main__\": test = [] random.seed() for i in range(20): test.append(random.randint(0, 100)) print", "<= end): if ((child + 1 <= end) and (a[child] < a[child +", "verify(a): prev = a[0] for item in range(1, len(a)): if prev > a[i]:", "* 2 + 1 else: return @staticmethod def heapify_max(a): end = len(a) -", "2 + 1 while (child <= end): if ((child + 1 <= end)", "+ 1 else: return @staticmethod def heapify_max(a): end = len(a) - 1 begin", "-= 1 @staticmethod def sort(a): end = len(a) - 1 Heapsort.heapify_max(a) while (end", "end = len(a) - 1 Heapsort.heapify_max(a) while (end > 0): a[0], a[end] =", "len(a)): if prev > a[i]: print \"FAIL: Array not sorted properly\" print \"--SUCCESS:", "= [] random.seed() for i in range(20): test.append(random.randint(0, 100)) print test Heapsort.sort(test) print", "\"\"\"Heapsort\"\"\" @staticmethod def bubble_down(a, begin, end): root = begin child = root *", "= len(a) - 1 begin = (end - 1) // 2 while (begin", "end -= 1 Heapsort.bubble_down(a, 0, end) @staticmethod def verify(a): prev = a[0] for", "1 while (child <= end): if ((child + 1 <= end) and (a[child]", "if ((child + 1 <= end) and (a[child] < a[child + 1])): child", "bubble_down(a, begin, end): root = begin child = root * 2 + 1", "1 Heapsort.bubble_down(a, 0, end) @staticmethod def verify(a): prev = a[0] for item in", "while (child <= end): if ((child + 1 <= end) and (a[child] <", "len(a) - 1 begin = (end - 1) // 2 while (begin >=", "\"__main__\": test = [] random.seed() for i in range(20): test.append(random.randint(0, 100)) print test", "-= 1 Heapsort.bubble_down(a, 0, end) @staticmethod def verify(a): prev = a[0] for item", "- 1 begin = (end - 1) // 2 while (begin >= 0):", "and (a[child] < a[child + 1])): child += 1 if (a[root] < a[child]):", "def sort(a): end = len(a) - 1 Heapsort.heapify_max(a) while (end > 0): a[0],", "end) and (a[child] < a[child + 1])): child += 1 if (a[root] <", "2 while (begin >= 0): Heapsort.bubble_down(a, begin, end) begin -= 1 @staticmethod def", "__name__ == \"__main__\": test = [] random.seed() for i in range(20): test.append(random.randint(0, 100))", "a[child], a[root] root = child child = root * 2 + 1 else:", "\"FAIL: Array not sorted properly\" print \"--SUCCESS: Sorted!--\" if __name__ == \"__main__\": test", "> a[i]: print \"FAIL: Array not sorted properly\" print \"--SUCCESS: Sorted!--\" if __name__", "@staticmethod def heapify_max(a): end = len(a) - 1 begin = (end - 1)", "+ 1 while (child <= end): if ((child + 1 <= end) and", "[] random.seed() for i in range(20): test.append(random.randint(0, 100)) print test Heapsort.sort(test) print test", "0): a[0], a[end] = a[end], a[0] end -= 1 Heapsort.bubble_down(a, 0, end) @staticmethod", "begin = (end - 1) // 2 while (begin >= 0): Heapsort.bubble_down(a, begin,", "child = root * 2 + 1 else: return @staticmethod def heapify_max(a): end", "= a[0] for item in range(1, len(a)): if prev > a[i]: print \"FAIL:", "<= end) and (a[child] < a[child + 1])): child += 1 if (a[root]", "child = root * 2 + 1 while (child <= end): if ((child", "= a[child], a[root] root = child child = root * 2 + 1", "Heapsort.heapify_max(a) while (end > 0): a[0], a[end] = a[end], a[0] end -= 1", "= a[end], a[0] end -= 1 Heapsort.bubble_down(a, 0, end) @staticmethod def verify(a): prev", "+ 1 <= end) and (a[child] < a[child + 1])): child += 1", "import random class Heapsort: \"\"\"Heapsort\"\"\" @staticmethod def bubble_down(a, begin, end): root = begin", "root = begin child = root * 2 + 1 while (child <=", "print \"--SUCCESS: Sorted!--\" if __name__ == \"__main__\": test = [] random.seed() for i", "= child child = root * 2 + 1 else: return @staticmethod def", "(begin >= 0): Heapsort.bubble_down(a, begin, end) begin -= 1 @staticmethod def sort(a): end", "prev = a[0] for item in range(1, len(a)): if prev > a[i]: print", "- 1 Heapsort.heapify_max(a) while (end > 0): a[0], a[end] = a[end], a[0] end", "= begin child = root * 2 + 1 while (child <= end):", "end = len(a) - 1 begin = (end - 1) // 2 while", "(end > 0): a[0], a[end] = a[end], a[0] end -= 1 Heapsort.bubble_down(a, 0,", "= len(a) - 1 Heapsort.heapify_max(a) while (end > 0): a[0], a[end] = a[end],", "(a[child] < a[child + 1])): child += 1 if (a[root] < a[child]): a[root],", "def bubble_down(a, begin, end): root = begin child = root * 2 +", "return @staticmethod def heapify_max(a): end = len(a) - 1 begin = (end -", "#!/usr/bin/python import random class Heapsort: \"\"\"Heapsort\"\"\" @staticmethod def bubble_down(a, begin, end): root =", "sorted properly\" print \"--SUCCESS: Sorted!--\" if __name__ == \"__main__\": test = [] random.seed()", "def heapify_max(a): end = len(a) - 1 begin = (end - 1) //", "@staticmethod def verify(a): prev = a[0] for item in range(1, len(a)): if prev", "2 + 1 else: return @staticmethod def heapify_max(a): end = len(a) - 1", "a[0] end -= 1 Heapsort.bubble_down(a, 0, end) @staticmethod def verify(a): prev = a[0]", "+= 1 if (a[root] < a[child]): a[root], a[child] = a[child], a[root] root =", "a[end], a[0] end -= 1 Heapsort.bubble_down(a, 0, end) @staticmethod def verify(a): prev =", "0): Heapsort.bubble_down(a, begin, end) begin -= 1 @staticmethod def sort(a): end = len(a)", "((child + 1 <= end) and (a[child] < a[child + 1])): child +=", "Heapsort.bubble_down(a, begin, end) begin -= 1 @staticmethod def sort(a): end = len(a) -", "Heapsort.bubble_down(a, 0, end) @staticmethod def verify(a): prev = a[0] for item in range(1,", "begin, end) begin -= 1 @staticmethod def sort(a): end = len(a) - 1", "a[end] = a[end], a[0] end -= 1 Heapsort.bubble_down(a, 0, end) @staticmethod def verify(a):", "a[0] for item in range(1, len(a)): if prev > a[i]: print \"FAIL: Array", "item in range(1, len(a)): if prev > a[i]: print \"FAIL: Array not sorted", "(child <= end): if ((child + 1 <= end) and (a[child] < a[child", "if (a[root] < a[child]): a[root], a[child] = a[child], a[root] root = child child", "1 else: return @staticmethod def heapify_max(a): end = len(a) - 1 begin =", "a[i]: print \"FAIL: Array not sorted properly\" print \"--SUCCESS: Sorted!--\" if __name__ ==", "def verify(a): prev = a[0] for item in range(1, len(a)): if prev >", "a[child + 1])): child += 1 if (a[root] < a[child]): a[root], a[child] =", "> 0): a[0], a[end] = a[end], a[0] end -= 1 Heapsort.bubble_down(a, 0, end)", "1 @staticmethod def sort(a): end = len(a) - 1 Heapsort.heapify_max(a) while (end >", "prev > a[i]: print \"FAIL: Array not sorted properly\" print \"--SUCCESS: Sorted!--\" if", "1])): child += 1 if (a[root] < a[child]): a[root], a[child] = a[child], a[root]", "test = [] random.seed() for i in range(20): test.append(random.randint(0, 100)) print test Heapsort.sort(test)", "(a[root] < a[child]): a[root], a[child] = a[child], a[root] root = child child =", "a[child] = a[child], a[root] root = child child = root * 2 +", "= root * 2 + 1 else: return @staticmethod def heapify_max(a): end =", "a[0], a[end] = a[end], a[0] end -= 1 Heapsort.bubble_down(a, 0, end) @staticmethod def", "root = child child = root * 2 + 1 else: return @staticmethod", "while (begin >= 0): Heapsort.bubble_down(a, begin, end) begin -= 1 @staticmethod def sort(a):", "- 1) // 2 while (begin >= 0): Heapsort.bubble_down(a, begin, end) begin -=", "= root * 2 + 1 while (child <= end): if ((child +", "end) @staticmethod def verify(a): prev = a[0] for item in range(1, len(a)): if", "child += 1 if (a[root] < a[child]): a[root], a[child] = a[child], a[root] root", "if prev > a[i]: print \"FAIL: Array not sorted properly\" print \"--SUCCESS: Sorted!--\"", "properly\" print \"--SUCCESS: Sorted!--\" if __name__ == \"__main__\": test = [] random.seed() for", "a[root] root = child child = root * 2 + 1 else: return", "begin, end): root = begin child = root * 2 + 1 while", "(end - 1) // 2 while (begin >= 0): Heapsort.bubble_down(a, begin, end) begin", "for item in range(1, len(a)): if prev > a[i]: print \"FAIL: Array not", "not sorted properly\" print \"--SUCCESS: Sorted!--\" if __name__ == \"__main__\": test = []", "while (end > 0): a[0], a[end] = a[end], a[0] end -= 1 Heapsort.bubble_down(a,", "print \"FAIL: Array not sorted properly\" print \"--SUCCESS: Sorted!--\" if __name__ == \"__main__\":", "// 2 while (begin >= 0): Heapsort.bubble_down(a, begin, end) begin -= 1 @staticmethod", "Sorted!--\" if __name__ == \"__main__\": test = [] random.seed() for i in range(20):", "* 2 + 1 while (child <= end): if ((child + 1 <=", "1) // 2 while (begin >= 0): Heapsort.bubble_down(a, begin, end) begin -= 1", "@staticmethod def sort(a): end = len(a) - 1 Heapsort.heapify_max(a) while (end > 0):", "1 Heapsort.heapify_max(a) while (end > 0): a[0], a[end] = a[end], a[0] end -=", "begin child = root * 2 + 1 while (child <= end): if", "1 begin = (end - 1) // 2 while (begin >= 0): Heapsort.bubble_down(a,", "else: return @staticmethod def heapify_max(a): end = len(a) - 1 begin = (end", "Heapsort: \"\"\"Heapsort\"\"\" @staticmethod def bubble_down(a, begin, end): root = begin child = root", "0, end) @staticmethod def verify(a): prev = a[0] for item in range(1, len(a)):", "len(a) - 1 Heapsort.heapify_max(a) while (end > 0): a[0], a[end] = a[end], a[0]", "child child = root * 2 + 1 else: return @staticmethod def heapify_max(a):" ]
[ "is None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short,", "assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_long(pipeline_type): \"\"\"Make a long", "make_pipeline_nested(pipeline_type): \"\"\"Make a nested pipeline.\"\"\" pipeline = pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(), EventLink()),", "assert not pipeline.top.has_receive() assert pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is", "pipelines module.\"\"\" # Builtins # Packages from phylline.links.clocked import DelayedEventLink from phylline.links.events import", "\"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_loopback:') automatic_pipeline = make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline)", "for pipeline bottom coupling on streams.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one", "def test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's clock functionality.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_delayed:') pipeline", "write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result = automatic_pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert", "from phylline.links.streams import StreamLink from phylline.pipelines import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler from phylline.pipes import", "None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory): \"\"\"Make a long", "the stack has correct below-loopback behavior.\"\"\" stack.send(payload) assert stack.has_receive() result = stack.receive() print('Loopback", "wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's clock", "a loopback at the top.\"\"\" manual_pipeline = pipeline_factory( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(),", "= pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM #", "TopLoopbackLink() ) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one,", "pipeline with a loopback at the top.\"\"\" manual_pipeline = pipeline_factory( StreamLink(), StreamLink(), StreamLink(),", "ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory {}:'.format(pipeline_factory.__name__)) pipeline = pipeline_factory(ManualPipeline) print(pipeline) #", "manual_pipeline = pipeline_factory( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink(), TopLoopbackLink() ) assert", "from phylline.links.links import ChunkedStreamLink from phylline.links.loopback import TopLoopbackLink from phylline.links.streams import StreamLink from", "print('Testing Automatic Pipeline with factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline = pipeline_factory(AutomaticPipeline) print(automatic_pipeline) # Read/write on", "AutomaticPipeline's clock functionality.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(AutomaticPipeline) print(pipeline)", "== 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.99) == 1.0 assert not pipeline.top.has_receive() assert", "make_pipeline_nested ]) def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory {}:'.format(pipeline_factory.__name__))", "with factory make_pipeline_loopback:') manual_pipeline = make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync() is None result", "is None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.update_clock(0) ==", "write_bottom_chunked_buffers(pipeline) assert pipeline.sync() is None assert_bottom_events(pipeline) write_top_events(pipeline) assert pipeline.sync() is None result =", "pipeline = pipeline_type( ChunkedStreamLink(), EventLink(), ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, EventLink) return", "automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def assert_loopback_below(stack,", "StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_events(pipeline_type): \"\"\"Make a events-only pipeline.\"\"\" pipeline", "assert not pipeline.top.has_receive() assert pipeline.update_clock(0.99) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(1.0) is", "\"\"\"Make a long pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(),", "assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one, payload) def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for pipeline bottom coupling on events.\"\"\"", "make_pipeline_events(AutomaticPipeline), TopLoopbackLink() ) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload)", "test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline = pipeline_factory(AutomaticPipeline)", "make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type) ) return pipeline @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short,", "assert result == LOWER_CHUNKED_STREAM def assert_loopback_below(stack, payload): \"\"\"Asset that the stack has correct", ") assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, ChunkedStreamLink) return pipeline def make_pipeline_short(pipeline_type): \"\"\"Make a", "{}:'.format(pipeline_factory.__name__)) pipeline = pipeline_factory(ManualPipeline) print(pipeline) # Read/write on links with directional sync write_bottom_chunked_buffers(pipeline.bottom)", "make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's interface.\"\"\" print('Testing Automatic Pipeline", "result == LOWER_CHUNKED_STREAM def assert_loopback_below(stack, payload): \"\"\"Asset that the stack has correct below-loopback", "pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.sync()", "assert stack.has_receive() result = stack.receive() print('Loopback received: {}'.format(result)) assert result.data == payload def", "clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.update_clock(0) == 1.0 assert pipeline.update_clock(0.5) ==", ") assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_long(pipeline_type): \"\"\"Make a", "factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(ManualPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync()", "StreamLink from phylline.pipelines import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler from phylline.pipes import AutomaticPipe import pytest", "EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def", "== 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write() assert", "def make_pipeline_events(pipeline_type): \"\"\"Make a events-only pipeline.\"\"\" pipeline = pipeline_type( EventLink(), EventLink(), EventLink(), EventLink()", "functionality.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert pipeline.update_clock(0)", "print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked(): \"\"\"Exercise", "HIGHER_CHUNKED_STREAM # Read/write on links with bidirectional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() is None", "pipeline @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's interface.\"\"\"", "assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.sync() == 1.0 assert", "pipeline.sync() is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert", "links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result))", "ChunkedStreamLink from phylline.links.loopback import TopLoopbackLink from phylline.links.streams import StreamLink from phylline.pipelines import AutomaticPipeline,", "= make_pipeline_delayed(ManualPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() == 1.0 assert", "return manual_pipeline def test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_loopback:')", "result == HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's clock functionality.\"\"\" print('Testing Automatic Pipeline with", "HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from tests.unit.pipes import ( assert_bottom_events, write_bottom_chunked_buffers, write_top_events ) def make_pipeline_singular(pipeline_type): \"\"\"Make", "make_pipeline_long, make_pipeline_nested ]) def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory", "a short pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), ChunkedStreamLink(), DelayedEventLink(), EventLink(), EventLink() ) assert", "from phylline.pipelines import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler from phylline.pipes import AutomaticPipe import pytest from", "HIGHER_CHUNKED_STREAM # Read/write on pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result = automatic_pipeline.to_write() print('Pipeline bottom", "the top.\"\"\" manual_pipeline = pipeline_factory( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink(), TopLoopbackLink()", "stack has correct below-loopback behavior.\"\"\" stack.send(payload) assert stack.has_receive() result = stack.receive() print('Loopback received:", "make_pipeline_singular(pipeline_type): \"\"\"Make a singular pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink() ) assert isinstance(pipeline.bottom, ChunkedStreamLink)", "a singular pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink() ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top,", "make_pipeline_nested(AutomaticPipeline) pipeline_two = make_pipeline_loopback(AutomaticPipeline) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top,", "stack.has_receive() result = stack.receive() print('Loopback received: {}'.format(result)) assert result.data == payload def test_loopback_pipeline_bottom_coupler_stream():", "stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Automatic", "on pipe with bidirectionl sync write_bottom_chunked_buffers(pipeline) assert pipeline.sync() is None assert_bottom_events(pipeline) write_top_events(pipeline) assert", "Automatic Pipeline with factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline = pipeline_factory(AutomaticPipeline) print(automatic_pipeline) # Read/write on links", "ChunkedStreamLink(), DelayedEventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline", "make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with", "payload def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for pipeline bottom coupling on streams.\"\"\" print('Testing byte buffer", "pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.update_clock(0)", "1.0 assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75) == 1.0 assert", "result = stack.receive() print('Loopback received: {}'.format(result)) assert result.data == payload def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test", "pipeline.update_clock(0.5) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.99) == 1.0 assert not pipeline.top.has_receive()", "= automatic_pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def", "wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on pipe with", "\"\"\"Make a long pipeline with a loopback at the top.\"\"\" manual_pipeline = pipeline_factory(", "is None assert_bottom_events(pipeline) write_top_events(pipeline) assert pipeline.sync() is None result = pipeline.to_write() print('Pipeline bottom", "== LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_loopback:')", "links with directional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync_down()", "print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0) == 1.0 assert not pipeline.top.has_receive()", "pipeline def make_pipeline_long(pipeline_type): \"\"\"Make a long pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), StreamLink(), StreamLink(),", "<reponame>ethanjli/phylline \"\"\"Test the pipelines module.\"\"\" # Builtins # Packages from phylline.links.clocked import DelayedEventLink", "assert pipeline.update_clock(0.99) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top) print('Resetting", "make_pipeline_loopback(AutomaticPipeline) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload)", "assert result == LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Automatic Pipeline with", "phylline.links.events import EventLink from phylline.links.links import ChunkedStreamLink from phylline.links.loopback import TopLoopbackLink from phylline.links.streams", "def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline =", "Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom)", "result = automatic_pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM", "assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result", "StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top,", "assert result == HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's clock functionality.\"\"\" print('Testing Automatic Pipeline", "pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory): \"\"\"Make a long pipeline with a", "= pipeline_factory( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink(), TopLoopbackLink() ) assert isinstance(manual_pipeline.bottom,", "AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink() ) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top,", "\"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink(), EventLink(), ) assert isinstance(pipeline.bottom, ChunkedStreamLink)", "{}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's clock functionality.\"\"\" print('Testing Automatic", "factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0)", "from phylline.links.events import EventLink from phylline.links.links import ChunkedStreamLink from phylline.links.loopback import TopLoopbackLink from", "assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result = automatic_pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result", "Read/write on pipe with bidirectionl sync write_bottom_chunked_buffers(pipeline) assert pipeline.sync() is None assert_bottom_events(pipeline) write_top_events(pipeline)", "pipeline.update_clock(0) == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75) ==", "EventLink) return pipeline def make_pipeline_events(pipeline_type): \"\"\"Make a events-only pipeline.\"\"\" pipeline = pipeline_type( EventLink(),", "payload) def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for pipeline bottom coupling on events.\"\"\" print('Testing byte buffer", "assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.99) == 1.0 assert not", "test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_loopback:') manual_pipeline = make_pipeline_loopback(ManualPipeline)", "make_pipeline_loopback:') manual_pipeline = make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync() is None result = manual_pipeline.bottom.to_write()", "on events.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_events(AutomaticPipeline) pipeline_two =", ") assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_events(pipeline_type): \"\"\"Make a", "test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory {}:'.format(pipeline_factory.__name__)) pipeline = pipeline_factory(ManualPipeline)", "make_pipeline_long(pipeline_type): \"\"\"Make a long pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(),", "result = pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM", "pipeline_type( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert", "behavior.\"\"\" stack.send(payload) assert stack.has_receive() result = stack.receive() print('Loopback received: {}'.format(result)) assert result.data ==", "bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on pipeline", "= pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type) ) return pipeline @pytest.mark.parametrize('pipeline_factory', [", "def test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's clock functionality.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_delayed:') pipeline", "def make_pipeline_nested(pipeline_type): \"\"\"Make a nested pipeline.\"\"\" pipeline = pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(),", "[ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual", "payload) assert_loopback_below(coupler.pipeline_one, payload) def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for pipeline bottom coupling on events.\"\"\" print('Testing", "pipeline bottom coupling on events.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one =", "pipeline_factory( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink(), TopLoopbackLink() ) assert isinstance(manual_pipeline.bottom, StreamLink)", "EventLink(), TopLoopbackLink() ) assert isinstance(manual_pipeline.bottom, StreamLink) assert isinstance(manual_pipeline.top, TopLoopbackLink) return manual_pipeline def test_manual_pipeline_loopback():", "bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on links", "EventLink(), AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type) ) return pipeline @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested", "not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.99) == 1.0", "has correct below-loopback behavior.\"\"\" stack.send(payload) assert stack.has_receive() result = stack.receive() print('Loopback received: {}'.format(result))", "manual_pipeline def test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_loopback:') manual_pipeline", "pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write", "write_top_events(pipeline.top) assert pipeline.sync() == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write() assert", "top.\"\"\" manual_pipeline = pipeline_factory( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink(), TopLoopbackLink() )", "EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_delayed(pipeline_type): \"\"\"Make", "HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory): \"\"\"Make a long pipeline with a loopback at the top.\"\"\"", "is None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory): \"\"\"Make a", "print(automatic_pipeline) # Read/write on links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom", "EventLink(), EventLink() ) assert isinstance(pipeline.bottom, EventLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_nested(pipeline_type):", "bottom coupling on events.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_events(AutomaticPipeline)", "to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's clock functionality.\"\"\"", "is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync() is None result = pipeline.bottom.to_write() print('Pipeline bottom", "automatic_pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked():", "= pipeline_type( ChunkedStreamLink() ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, ChunkedStreamLink) return pipeline def", "HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's interface.\"\"\"", "assert_loopback_below(coupler.pipeline_one, payload) def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for pipeline bottom coupling on events.\"\"\" print('Testing byte", "= b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one, payload) def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for pipeline", "Packages from phylline.links.clocked import DelayedEventLink from phylline.links.events import EventLink from phylline.links.links import ChunkedStreamLink", "events-only pipeline.\"\"\" pipeline = pipeline_type( EventLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, EventLink)", "pipeline = make_pipeline_delayed(ManualPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() == 1.0", "not pipeline.bottom.to_write() assert pipeline.update_clock(0.75) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is None", "factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline = pipeline_factory(AutomaticPipeline) print(automatic_pipeline) # Read/write on links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top)", "]) def test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory {}:'.format(pipeline_factory.__name__)) pipeline", "print('Testing Automatic Pipeline with factory make_pipeline_loopback:') automatic_pipeline = make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result =", "is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result", "assert isinstance(manual_pipeline.top, TopLoopbackLink) return manual_pipeline def test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline", "pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink(), EventLink(), ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, EventLink)", "phylline.links.loopback import TopLoopbackLink from phylline.links.streams import StreamLink from phylline.pipelines import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler", "None result = pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result ==", "AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type) ) return pipeline @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ])", "pipeline.top.has_receive() assert pipeline.update_clock(0.99) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top)", "pipeline = pipeline_type( ChunkedStreamLink() ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, ChunkedStreamLink) return pipeline", "the pipelines module.\"\"\" # Builtins # Packages from phylline.links.clocked import DelayedEventLink from phylline.links.events", "None write_top_events(pipeline.top) assert pipeline.update_clock(0) == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write()", "on links with directional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert", "import TopLoopbackLink from phylline.links.streams import StreamLink from phylline.pipelines import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler from", "with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(ManualPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert", "1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.99) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(1.0)", "def test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_loopback:') automatic_pipeline =", "write_top_events(pipeline.top) assert pipeline.update_clock(0) == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write() assert", "result = manual_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM", "with factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline = pipeline_factory(AutomaticPipeline) print(automatic_pipeline) # Read/write on links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top)", "pipeline_one = make_pipeline_nested(AutomaticPipeline) pipeline_two = make_pipeline_loopback(AutomaticPipeline) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload =", "assert pipeline.sync() is None result = pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result))", "assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_events(pipeline_type): \"\"\"Make a events-only pipeline.\"\"\" pipeline =", "def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for pipeline bottom coupling on streams.\"\"\" print('Testing byte buffer loopback", "pipeline def make_pipeline_nested(pipeline_type): \"\"\"Make a nested pipeline.\"\"\" pipeline = pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(),", "isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_delayed(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type(", "Manual Pipeline with factory make_pipeline_loopback:') manual_pipeline = make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync() is", "from phylline.links.loopback import TopLoopbackLink from phylline.links.streams import StreamLink from phylline.pipelines import AutomaticPipeline, ManualPipeline,", "{}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on links with bidirectional sync write_bottom_chunked_buffers(pipeline.bottom)", "bottom coupling on streams.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_nested(AutomaticPipeline)", "short pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink(), EventLink(), ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top,", "TopLoopbackLink) return manual_pipeline def test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory", "1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write() assert result", "write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert", "to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def assert_loopback_below(stack, payload): \"\"\"Asset that the", "HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's clock functionality.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_delayed:')", "on links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream:", "EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_events(pipeline_type): \"\"\"Make", "# Packages from phylline.links.clocked import DelayedEventLink from phylline.links.events import EventLink from phylline.links.links import", "pipeline def make_pipeline_delayed(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), ChunkedStreamLink(), DelayedEventLink(),", "payload): \"\"\"Asset that the stack has correct below-loopback behavior.\"\"\" stack.send(payload) assert stack.has_receive() result", "print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() == 1.0 assert not pipeline.top.has_receive()", "assert manual_pipeline.sync() is None result = manual_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result))", "= automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def", "pipeline.sync() == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75) ==", "@pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing", "\"\"\"Test the pipelines module.\"\"\" # Builtins # Packages from phylline.links.clocked import DelayedEventLink from", ") assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_delayed(pipeline_type): \"\"\"Make a", "( assert_bottom_events, write_bottom_chunked_buffers, write_top_events ) def make_pipeline_singular(pipeline_type): \"\"\"Make a singular pipeline.\"\"\" pipeline =", "ChunkedStreamLink() ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, ChunkedStreamLink) return pipeline def make_pipeline_short(pipeline_type): \"\"\"Make", "make_pipeline_short(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink(), EventLink(), ) assert isinstance(pipeline.bottom,", "assert result == HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory): \"\"\"Make a long pipeline with a loopback", "assert not pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write() assert result ==", "automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write", "with bidirectional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync() is", "None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long,", "buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_events(AutomaticPipeline) pipeline_two = AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink() )", "interface.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_loopback:') manual_pipeline = make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert", "pipeline_type( EventLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, EventLink) assert isinstance(pipeline.top, EventLink) return", "isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_long(pipeline_type): \"\"\"Make a long pipeline.\"\"\" pipeline = pipeline_type(", "result == LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory", "assert pipeline.update_clock(0.75) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is None result =", "long pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink() )", "write_top_events(pipeline) assert pipeline.sync() is None result = pipeline.to_write() print('Pipeline bottom wrote to stream:", "== payload def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for pipeline bottom coupling on streams.\"\"\" print('Testing byte", "HIGHER_CHUNKED_STREAM # Read/write on pipe with bidirectionl sync write_bottom_chunked_buffers(pipeline) assert pipeline.sync() is None", "write_top_events(pipeline.top) assert pipeline.sync() is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote to stream:", "to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's clock functionality.\"\"\"", "assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.99) ==", "\"\"\"Exercise AutomaticPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline = pipeline_factory(AutomaticPipeline) print(automatic_pipeline)", "test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for pipeline bottom coupling on events.\"\"\" print('Testing byte buffer loopback with", "pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.99) == 1.0 assert", "Pipeline with factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline = pipeline_factory(AutomaticPipeline) print(automatic_pipeline) # Read/write on links write_bottom_chunked_buffers(automatic_pipeline.bottom)", "EventLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, EventLink) assert isinstance(pipeline.top, EventLink) return pipeline", "EventLink) return pipeline def make_pipeline_delayed(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( StreamLink(),", "AutomaticPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline = pipeline_factory(AutomaticPipeline) print(automatic_pipeline) #", "loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_nested(AutomaticPipeline) pipeline_two = make_pipeline_loopback(AutomaticPipeline) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two)", "pipeline_factory(ManualPipeline) print(pipeline) # Read/write on links with directional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up() is", "byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_events(AutomaticPipeline) pipeline_two = AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink()", "bidirectional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync() is None", "not pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM", "EventLink() ) assert isinstance(pipeline.bottom, EventLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_nested(pipeline_type): \"\"\"Make", "StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink(), TopLoopbackLink() ) assert isinstance(manual_pipeline.bottom, StreamLink) assert isinstance(manual_pipeline.top, TopLoopbackLink)", "EventLink(), ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_long(pipeline_type): \"\"\"Make", "wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's clock", "print('Testing Automatic Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert pipeline.update_clock(0) is", "bidirectionl sync write_bottom_chunked_buffers(pipeline) assert pipeline.sync() is None assert_bottom_events(pipeline) write_top_events(pipeline) assert pipeline.sync() is None", "Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(ManualPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom)", "result.data == payload def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for pipeline bottom coupling on streams.\"\"\" print('Testing", "assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is", "Read/write on pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result = automatic_pipeline.to_write() print('Pipeline bottom wrote to", "pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def", "DelayedEventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def", "assert isinstance(manual_pipeline.bottom, StreamLink) assert isinstance(manual_pipeline.top, TopLoopbackLink) return manual_pipeline def test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\"", "pytest from tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from tests.unit.pipes import ( assert_bottom_events, write_bottom_chunked_buffers, write_top_events", "stack.receive() print('Loopback received: {}'.format(result)) assert result.data == payload def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for pipeline", "phylline.links.streams import StreamLink from phylline.pipelines import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler from phylline.pipes import AutomaticPipe", "assert_bottom_events, write_bottom_chunked_buffers, write_top_events ) def make_pipeline_singular(pipeline_type): \"\"\"Make a singular pipeline.\"\"\" pipeline = pipeline_type(", "import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler from phylline.pipes import AutomaticPipe import pytest from tests.unit.links.links import", "pipeline.bottom.to_write() assert pipeline.update_clock(0.75) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is None result", "assert result == HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_automatic_pipeline(pipeline_factory):", "result == HIGHER_CHUNKED_STREAM # Read/write on pipe with bidirectionl sync write_bottom_chunked_buffers(pipeline) assert pipeline.sync()", "events.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_events(AutomaticPipeline) pipeline_two = AutomaticPipeline(", "bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on pipe", "pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result = automatic_pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result))", "ChunkedStreamLink(), EventLink(), ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_long(pipeline_type):", "ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_loopback:') manual_pipeline = make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom)", "print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result", "received: {}'.format(result)) assert result.data == payload def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for pipeline bottom coupling", "TopLoopbackLink from phylline.links.streams import StreamLink from phylline.pipelines import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler from phylline.pipes", "= manual_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def", "from tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from tests.unit.pipes import ( assert_bottom_events, write_bottom_chunked_buffers, write_top_events )", "= PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one, payload)", "ChunkedStreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_long(pipeline_type): \"\"\"Make a long pipeline.\"\"\" pipeline", "HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's clock functionality.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_delayed:')", "pipeline def make_pipeline_events(pipeline_type): \"\"\"Make a events-only pipeline.\"\"\" pipeline = pipeline_type( EventLink(), EventLink(), EventLink(),", "assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0) == 1.0 assert not pipeline.top.has_receive() assert", "PipelineBottomCoupler...') pipeline_one = make_pipeline_events(AutomaticPipeline) pipeline_two = AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink() ) coupler = PipelineBottomCoupler(pipeline_one,", "import StreamLink from phylline.pipelines import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler from phylline.pipes import AutomaticPipe import", "def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for pipeline bottom coupling on events.\"\"\" print('Testing byte buffer loopback", "# Builtins # Packages from phylline.links.clocked import DelayedEventLink from phylline.links.events import EventLink from", "streams.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_nested(AutomaticPipeline) pipeline_two = make_pipeline_loopback(AutomaticPipeline)", "pipeline.\"\"\" pipeline = pipeline_type( EventLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, EventLink) assert", "result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested", "isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_nested(pipeline_type): \"\"\"Make a nested pipeline.\"\"\" pipeline = pipeline_type(", "Read/write on links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to", "Pipeline with factory make_pipeline_loopback:') automatic_pipeline = make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result = automatic_pipeline.bottom.to_write() print('Pipeline", "that the stack has correct below-loopback behavior.\"\"\" stack.send(payload) assert stack.has_receive() result = stack.receive()", "pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.sync() == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert", "\"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_loopback:') manual_pipeline = make_pipeline_loopback(ManualPipeline) print(manual_pipeline)", "pipeline = pipeline_type( StreamLink(), ChunkedStreamLink(), DelayedEventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert", "assert pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory):", "loopback at the top.\"\"\" manual_pipeline = pipeline_factory( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(),", "assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.sync() == 1.0 assert pipeline.update_clock(0.5) == 1.0", "import AutomaticPipe import pytest from tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from tests.unit.pipes import (", "assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync() is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote to", "make_pipeline_delayed:') pipeline = make_pipeline_delayed(ManualPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() ==", "= pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def", "pipeline.update_clock(0.99) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top) print('Resetting clock...')", "Automatic Pipeline with factory make_pipeline_loopback:') automatic_pipeline = make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result = automatic_pipeline.bottom.to_write()", "nested pipeline.\"\"\" pipeline = pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type) ) return", "return pipeline @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's", "assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() == 1.0 assert not pipeline.top.has_receive() assert", "make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync() is None result = manual_pipeline.bottom.to_write() print('Pipeline bottom wrote", "stack.send(payload) assert stack.has_receive() result = stack.receive() print('Loopback received: {}'.format(result)) assert result.data == payload", "payload) assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one, payload) def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for pipeline bottom coupling on", "import DelayedEventLink from phylline.links.events import EventLink from phylline.links.links import ChunkedStreamLink from phylline.links.loopback import", "clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.sync() == 1.0 assert pipeline.update_clock(0.5) ==", "1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.99)", "isinstance(manual_pipeline.top, TopLoopbackLink) return manual_pipeline def test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with", "== 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.top.has_receive() assert", "PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one, payload) def", "def assert_loopback_below(stack, payload): \"\"\"Asset that the stack has correct below-loopback behavior.\"\"\" stack.send(payload) assert", "on links with bidirectional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert", "byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_nested(AutomaticPipeline) pipeline_two = make_pipeline_loopback(AutomaticPipeline) coupler =", "Pipeline with factory make_pipeline_loopback:') manual_pipeline = make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync() is None", "clock functionality.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(ManualPipeline) print(pipeline) assert", "singular pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink() ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, ChunkedStreamLink)", "pipeline = pipeline_type( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom,", "assert pipeline.sync() == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75)", "phylline.links.clocked import DelayedEventLink from phylline.links.events import EventLink from phylline.links.links import ChunkedStreamLink from phylline.links.loopback", "correct below-loopback behavior.\"\"\" stack.send(payload) assert stack.has_receive() result = stack.receive() print('Loopback received: {}'.format(result)) assert", "= pipeline_type( ChunkedStreamLink(), EventLink(), ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, EventLink) return pipeline", ") assert isinstance(manual_pipeline.bottom, StreamLink) assert isinstance(manual_pipeline.top, TopLoopbackLink) return manual_pipeline def test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's", "write_top_events(automatic_pipeline.top) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result ==", "def make_pipeline_loopback(pipeline_factory): \"\"\"Make a long pipeline with a loopback at the top.\"\"\" manual_pipeline", "with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert", "sync write_bottom_chunked_buffers(pipeline) assert pipeline.sync() is None assert_bottom_events(pipeline) write_top_events(pipeline) assert pipeline.sync() is None result", "= make_pipeline_events(AutomaticPipeline) pipeline_two = AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink() ) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler)", "automatic_pipeline = pipeline_factory(AutomaticPipeline) print(automatic_pipeline) # Read/write on links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result =", "{}'.format(result)) assert result.data == payload def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for pipeline bottom coupling on", "is None result = pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result", "EventLink(), EventLink(), TopLoopbackLink() ) assert isinstance(manual_pipeline.bottom, StreamLink) assert isinstance(manual_pipeline.top, TopLoopbackLink) return manual_pipeline def", "make_pipeline_delayed:') pipeline = make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0) ==", "from phylline.pipes import AutomaticPipe import pytest from tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from tests.unit.pipes", "assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, ChunkedStreamLink) return pipeline def make_pipeline_short(pipeline_type): \"\"\"Make a short", "return pipeline def make_pipeline_nested(pipeline_type): \"\"\"Make a nested pipeline.\"\"\" pipeline = pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type),", "assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync_down() is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote to", "isinstance(pipeline.bottom, EventLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_nested(pipeline_type): \"\"\"Make a nested pipeline.\"\"\"", "pipeline bottom coupling on streams.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one =", "= make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync() is None result = manual_pipeline.bottom.to_write() print('Pipeline bottom", "\"\"\"Test for pipeline bottom coupling on streams.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...')", "assert pipeline.update_clock(0) == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75)", "test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's clock functionality.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_delayed:') pipeline =", "links with bidirectional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync()", ") def make_pipeline_singular(pipeline_type): \"\"\"Make a singular pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink() ) assert", "with bidirectionl sync write_bottom_chunked_buffers(pipeline) assert pipeline.sync() is None assert_bottom_events(pipeline) write_top_events(pipeline) assert pipeline.sync() is", "= pipeline_type( StreamLink(), ChunkedStreamLink(), DelayedEventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top,", "pipeline = pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type) ) return pipeline @pytest.mark.parametrize('pipeline_factory',", "EventLink) return pipeline def make_pipeline_long(pipeline_type): \"\"\"Make a long pipeline.\"\"\" pipeline = pipeline_type( StreamLink(),", "test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's clock functionality.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_delayed:') pipeline =", "None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync_down() is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote", "assert isinstance(pipeline.bottom, EventLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_nested(pipeline_type): \"\"\"Make a nested", "# Read/write on pipe with bidirectionl sync write_bottom_chunked_buffers(pipeline) assert pipeline.sync() is None assert_bottom_events(pipeline)", "manual_pipeline.sync() is None result = manual_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert", "Manual Pipeline with factory {}:'.format(pipeline_factory.__name__)) pipeline = pipeline_factory(ManualPipeline) print(pipeline) # Read/write on links", "assert result == HIGHER_CHUNKED_STREAM # Read/write on pipe with bidirectionl sync write_bottom_chunked_buffers(pipeline) assert", "import EventLink from phylline.links.links import ChunkedStreamLink from phylline.links.loopback import TopLoopbackLink from phylline.links.streams import", "None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.update_clock(0) == 1.0", "result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory): \"\"\"Make a long pipeline", "is None result = manual_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result", "assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.update_clock(0) == 1.0 assert pipeline.update_clock(0.5) == 1.0", "AutomaticPipeline, ManualPipeline, PipelineBottomCoupler from phylline.pipes import AutomaticPipe import pytest from tests.unit.links.links import HIGHER_CHUNKED_STREAM,", "print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.update_clock(0) == 1.0 assert pipeline.update_clock(0.5)", "a long pipeline with a loopback at the top.\"\"\" manual_pipeline = pipeline_factory( StreamLink(),", "pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory): \"\"\"Make", "return pipeline def make_pipeline_short(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink(), EventLink(),", "pipeline.sync() is None assert_bottom_events(pipeline) write_top_events(pipeline) assert pipeline.sync() is None result = pipeline.to_write() print('Pipeline", "assert result == HIGHER_CHUNKED_STREAM # Read/write on pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result =", "pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink() ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, ChunkedStreamLink) return", "buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_nested(AutomaticPipeline) pipeline_two = make_pipeline_loopback(AutomaticPipeline) coupler = PipelineBottomCoupler(pipeline_one,", "None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.sync() == 1.0", "manual_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback():", "on pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result = automatic_pipeline.to_write() print('Pipeline bottom wrote to stream:", "StreamLink(), ChunkedStreamLink(), DelayedEventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return", "is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) ==", "bottom wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def assert_loopback_below(stack, payload): \"\"\"Asset", "on streams.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_nested(AutomaticPipeline) pipeline_two =", "Pipeline with factory {}:'.format(pipeline_factory.__name__)) pipeline = pipeline_factory(ManualPipeline) print(pipeline) # Read/write on links with", "at the top.\"\"\" manual_pipeline = pipeline_factory( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink(),", "assert isinstance(pipeline.top, ChunkedStreamLink) return pipeline def make_pipeline_short(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline =", "result == HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory): \"\"\"Make a long pipeline with a loopback at", "make_pipeline_events(pipeline_type): \"\"\"Make a events-only pipeline.\"\"\" pipeline = pipeline_type( EventLink(), EventLink(), EventLink(), EventLink() )", "pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory',", "ManualPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_loopback:') automatic_pipeline = make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom)", "print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_nested(AutomaticPipeline) pipeline_two = make_pipeline_loopback(AutomaticPipeline) coupler", "Read/write on links with directional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top)", "sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync_down() is None result", "ManualPipeline, PipelineBottomCoupler from phylline.pipes import AutomaticPipe import pytest from tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM", "result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM", "assert pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert", "isinstance(manual_pipeline.bottom, StreamLink) assert isinstance(manual_pipeline.top, TopLoopbackLink) return manual_pipeline def test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing", "Builtins # Packages from phylline.links.clocked import DelayedEventLink from phylline.links.events import EventLink from phylline.links.links", "stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's clock functionality.\"\"\" print('Testing", "test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_loopback:') automatic_pipeline = make_pipeline_loopback(AutomaticPipeline)", "test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for pipeline bottom coupling on streams.\"\"\" print('Testing byte buffer loopback with", "[ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's interface.\"\"\" print('Testing Automatic", "= pipeline_factory(AutomaticPipeline) print(automatic_pipeline) # Read/write on links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result = automatic_pipeline.bottom.to_write()", "isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_events(pipeline_type): \"\"\"Make a events-only pipeline.\"\"\"", "wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def assert_loopback_below(stack, payload): \"\"\"Asset that", "assert pipeline.sync_up() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync_down() is None result = pipeline.bottom.to_write()", "pipeline.sync() == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.top.has_receive()", "assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_delayed(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline =", "= pipeline_type( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink)", "assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75) == 1.0 assert not", "Manual Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(ManualPipeline) print(pipeline) assert pipeline.update_clock(0) is None", "make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's interface.\"\"\" print('Testing Automatic Pipeline with", "AutomaticPipe import pytest from tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from tests.unit.pipes import ( assert_bottom_events,", "with factory make_pipeline_loopback:') automatic_pipeline = make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom", "def make_pipeline_long(pipeline_type): \"\"\"Make a long pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(),", "pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink() ) assert", "make_pipeline_long, make_pipeline_nested ]) def test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory", "write_top_events ) def make_pipeline_singular(pipeline_type): \"\"\"Make a singular pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink() )", "tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from tests.unit.pipes import ( assert_bottom_events, write_bottom_chunked_buffers, write_top_events ) def", "return pipeline def make_pipeline_long(pipeline_type): \"\"\"Make a long pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), StreamLink(),", "pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75) == 1.0 assert not pipeline.bottom.to_write()", "pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), ChunkedStreamLink(), DelayedEventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink)", "import pytest from tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from tests.unit.pipes import ( assert_bottom_events, write_bottom_chunked_buffers,", "result == HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise", "\"\"\"Exercise AutomaticPipeline's clock functionality.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(AutomaticPipeline)", "with directional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync_down() is", "StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink(), TopLoopbackLink() ) assert isinstance(manual_pipeline.bottom, StreamLink) assert", "== LOWER_CHUNKED_STREAM def assert_loopback_below(stack, payload): \"\"\"Asset that the stack has correct below-loopback behavior.\"\"\"", "make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0) == 1.0 assert not", "make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert", "print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync() is None result = manual_pipeline.bottom.to_write() print('Pipeline bottom wrote to", "pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5)", "import ChunkedStreamLink from phylline.links.loopback import TopLoopbackLink from phylline.links.streams import StreamLink from phylline.pipelines import", "pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked():", "wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on pipeline write_bottom_chunked_buffers(automatic_pipeline)", "assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.update_clock(0) == 1.0 assert", "# Read/write on links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote", "stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's clock functionality.\"\"\" print('Testing", "]) def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline", "make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type) ) return pipeline @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long,", "make_pipeline_delayed(ManualPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() == 1.0 assert not", "None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync() is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote", "print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked(): \"\"\"Exercise", ") assert isinstance(pipeline.bottom, EventLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_nested(pipeline_type): \"\"\"Make a", "print('Testing Manual Pipeline with factory {}:'.format(pipeline_factory.__name__)) pipeline = pipeline_factory(ManualPipeline) print(pipeline) # Read/write on", "print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one, payload) def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test", "pipeline.update_clock(0) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.top.has_receive()", "{}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result", "assert result == HIGHER_CHUNKED_STREAM # Read/write on links with bidirectional sync write_bottom_chunked_buffers(pipeline.bottom) assert", "stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline)", "\"\"\"Make a singular pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink() ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert", "= pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ])", "ChunkedStreamLink) return pipeline def make_pipeline_short(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink(),", "make_pipeline_loopback:') automatic_pipeline = make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to", "print('Testing Manual Pipeline with factory make_pipeline_loopback:') manual_pipeline = make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync()", "pipeline = pipeline_factory(ManualPipeline) print(pipeline) # Read/write on links with directional sync write_bottom_chunked_buffers(pipeline.bottom) assert", "isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_delayed(pipeline_type): \"\"\"Make a short pipeline.\"\"\"", "assert result == HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's clock functionality.\"\"\" print('Testing Manual Pipeline", "return pipeline def make_pipeline_delayed(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), ChunkedStreamLink(),", "import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from tests.unit.pipes import ( assert_bottom_events, write_bottom_chunked_buffers, write_top_events ) def make_pipeline_singular(pipeline_type):", "make_pipeline_loopback(pipeline_factory): \"\"\"Make a long pipeline with a loopback at the top.\"\"\" manual_pipeline =", "a long pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink()", "make_pipeline_events(pipeline_type) ) return pipeline @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_manual_pipeline(pipeline_factory):", "assert pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [", "manual_pipeline = make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync() is None result = manual_pipeline.bottom.to_write() print('Pipeline", "pipeline_type( StreamLink(), ChunkedStreamLink(), DelayedEventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink)", "StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink)", "with factory {}:'.format(pipeline_factory.__name__)) pipeline = pipeline_factory(ManualPipeline) print(pipeline) # Read/write on links with directional", "module.\"\"\" # Builtins # Packages from phylline.links.clocked import DelayedEventLink from phylline.links.events import EventLink", "EventLink) return pipeline def make_pipeline_nested(pipeline_type): \"\"\"Make a nested pipeline.\"\"\" pipeline = pipeline_type( make_pipeline_singular(pipeline_type),", "@pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's interface.\"\"\" print('Testing", "\"\"\"Make a nested pipeline.\"\"\" pipeline = pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type)", "pipeline_one = make_pipeline_events(AutomaticPipeline) pipeline_two = AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink() ) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two)", "None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0", "pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular,", "LOWER_CHUNKED_STREAM def assert_loopback_below(stack, payload): \"\"\"Asset that the stack has correct below-loopback behavior.\"\"\" stack.send(payload)", "print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback(): \"\"\"Exercise", "isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_long(pipeline_type): \"\"\"Make a long pipeline.\"\"\"", "EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_delayed(pipeline_type):", "{}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on pipe with bidirectionl sync write_bottom_chunked_buffers(pipeline)", "to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline)", "None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0", "print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def assert_loopback_below(stack, payload):", "tests.unit.pipes import ( assert_bottom_events, write_bottom_chunked_buffers, write_top_events ) def make_pipeline_singular(pipeline_type): \"\"\"Make a singular pipeline.\"\"\"", "ChunkedStreamLink) assert isinstance(pipeline.top, ChunkedStreamLink) return pipeline def make_pipeline_short(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline", "EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, EventLink) assert isinstance(pipeline.top, EventLink) return pipeline def", "make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline", "# Read/write on links with bidirectional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() is None assert_bottom_events(pipeline.top)", "print(pipeline) # Read/write on links with directional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up() is None", "assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one, payload) def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for pipeline bottom coupling", "{}'.format(result)) assert result == LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Automatic Pipeline", "pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.update_clock(0) == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert", "assert_bottom_events(pipeline) write_top_events(pipeline) assert pipeline.sync() is None result = pipeline.to_write() print('Pipeline bottom wrote to", "coupling on events.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_events(AutomaticPipeline) pipeline_two", "result == HIGHER_CHUNKED_STREAM # Read/write on pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result = automatic_pipeline.to_write()", "StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return", "print('Loopback received: {}'.format(result)) assert result.data == payload def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for pipeline bottom", "coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one,", "None write_top_events(pipeline.top) assert pipeline.sync() == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write()", "StreamLink(), StreamLink(), ChunkedStreamLink(), EventLink(), EventLink(), EventLink(), TopLoopbackLink() ) assert isinstance(manual_pipeline.bottom, StreamLink) assert isinstance(manual_pipeline.top,", "a nested pipeline.\"\"\" pipeline = pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type) )", "a events-only pipeline.\"\"\" pipeline = pipeline_type( EventLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom,", "write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync_down() is None result =", "a short pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink(), EventLink(), ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert", "\"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory {}:'.format(pipeline_factory.__name__)) pipeline = pipeline_factory(ManualPipeline) print(pipeline)", "factory {}:'.format(pipeline_factory.__name__)) pipeline = pipeline_factory(ManualPipeline) print(pipeline) # Read/write on links with directional sync", "assert pipeline.sync() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync() is None result = pipeline.bottom.to_write()", "interface.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_loopback:') automatic_pipeline = make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result", "factory make_pipeline_loopback:') automatic_pipeline = make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote", "== 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top) print('Resetting clock...') assert", "\"\"\"Exercise ManualPipeline's clock functionality.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(ManualPipeline)", "wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on links with", "bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's", "assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_long(pipeline_type): \"\"\"Make a long pipeline.\"\"\" pipeline =", "from tests.unit.pipes import ( assert_bottom_events, write_bottom_chunked_buffers, write_top_events ) def make_pipeline_singular(pipeline_type): \"\"\"Make a singular", "def make_pipeline_short(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink(), EventLink(), ) assert", "pipeline.sync_up() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync_down() is None result = pipeline.bottom.to_write() print('Pipeline", "factory make_pipeline_loopback:') manual_pipeline = make_pipeline_loopback(ManualPipeline) print(manual_pipeline) write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync() is None result =", "assert pipeline.sync() is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result))", "write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync() is None result =", "None assert_bottom_events(pipeline) write_top_events(pipeline) assert pipeline.sync() is None result = pipeline.to_write() print('Pipeline bottom wrote", "print('Testing Manual Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(ManualPipeline) print(pipeline) assert pipeline.update_clock(0) is", "is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync_down() is None result = pipeline.bottom.to_write() print('Pipeline bottom", "write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert", "\"\"\"Make a events-only pipeline.\"\"\" pipeline = pipeline_type( EventLink(), EventLink(), EventLink(), EventLink() ) assert", "pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one, payload) def test_loopback_pipeline_bottom_coupler_event():", "assert pipeline.sync_down() is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result))", "print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_events(AutomaticPipeline) pipeline_two = AutomaticPipeline( make_pipeline_events(AutomaticPipeline),", "pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5)", "pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type) ) return pipeline @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular,", "make_pipeline_events(AutomaticPipeline) pipeline_two = AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink() ) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload", "pipeline.top.has_receive() assert pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top)", "long pipeline with a loopback at the top.\"\"\" manual_pipeline = pipeline_factory( StreamLink(), StreamLink(),", "interface.\"\"\" print('Testing Automatic Pipeline with factory {}:'.format(pipeline_factory.__name__)) automatic_pipeline = pipeline_factory(AutomaticPipeline) print(automatic_pipeline) # Read/write", "stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on pipe with bidirectionl sync", "pipeline_type( ChunkedStreamLink(), EventLink(), ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def", "return pipeline def make_pipeline_events(pipeline_type): \"\"\"Make a events-only pipeline.\"\"\" pipeline = pipeline_type( EventLink(), EventLink(),", "def test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory {}:'.format(pipeline_factory.__name__)) pipeline =", "= pipeline_type( EventLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, EventLink) assert isinstance(pipeline.top, EventLink)", "not pipeline.top.has_receive() assert pipeline.update_clock(0.99) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(1.0) is None", "assert result.data == payload def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for pipeline bottom coupling on streams.\"\"\"", "interface.\"\"\" print('Testing Manual Pipeline with factory {}:'.format(pipeline_factory.__name__)) pipeline = pipeline_factory(ManualPipeline) print(pipeline) # Read/write", "pipeline.\"\"\" pipeline = pipeline_type( make_pipeline_singular(pipeline_type), make_pipeline_events(pipeline_type), EventLink(), AutomaticPipe(EventLink(), EventLink()), make_pipeline_events(pipeline_type) ) return pipeline", "make_pipeline_nested ]) def test_manual_pipeline(pipeline_factory): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory {}:'.format(pipeline_factory.__name__))", "print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on", "# Read/write on pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result = automatic_pipeline.to_write() print('Pipeline bottom wrote", "result = pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM", "wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\"", "print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.sync() == 1.0 assert pipeline.update_clock(0.5)", "write_bottom_chunked_buffers(automatic_pipeline.bottom) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result ==", "\"\"\"Asset that the stack has correct below-loopback behavior.\"\"\" stack.send(payload) assert stack.has_receive() result =", "None result = manual_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result ==", "pipeline_two = make_pipeline_loopback(AutomaticPipeline) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload)", "1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(1.5)", "assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_delayed(pipeline_type): \"\"\"Make a short", "LOWER_CHUNKED_STREAM from tests.unit.pipes import ( assert_bottom_events, write_bottom_chunked_buffers, write_top_events ) def make_pipeline_singular(pipeline_type): \"\"\"Make a", "PipelineBottomCoupler...') pipeline_one = make_pipeline_nested(AutomaticPipeline) pipeline_two = make_pipeline_loopback(AutomaticPipeline) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload", "= pipeline_factory(ManualPipeline) print(pipeline) # Read/write on links with directional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up()", "short pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), ChunkedStreamLink(), DelayedEventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom,", "== HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory): \"\"\"Make a long pipeline with a loopback at the", "def test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_loopback:') manual_pipeline =", "EventLink()), make_pipeline_events(pipeline_type) ) return pipeline @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def", "= make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result))", "functionality.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(ManualPipeline) print(pipeline) assert pipeline.update_clock(0)", "= make_pipeline_loopback(AutomaticPipeline) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one,", "pipeline = make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0) == 1.0", "import ( assert_bottom_events, write_bottom_chunked_buffers, write_top_events ) def make_pipeline_singular(pipeline_type): \"\"\"Make a singular pipeline.\"\"\" pipeline", "b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one, payload) def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for pipeline bottom", "clock functionality.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert", "for pipeline bottom coupling on events.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one", "result == HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's clock functionality.\"\"\" print('Testing Manual Pipeline with", "automatic_pipeline = make_pipeline_loopback(AutomaticPipeline) print(automatic_pipeline) write_bottom_chunked_buffers(automatic_pipeline.bottom) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream:", "phylline.pipelines import AutomaticPipeline, ManualPipeline, PipelineBottomCoupler from phylline.pipes import AutomaticPipe import pytest from tests.unit.links.links", "== 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75) == 1.0 assert not pipeline.bottom.to_write() assert", "to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on links with bidirectional", "phylline.pipes import AutomaticPipe import pytest from tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from tests.unit.pipes import", "\"\"\"Test for pipeline bottom coupling on events.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...')", "is None write_top_events(pipeline.top) assert pipeline.sync() == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not", "ManualPipeline's clock functionality.\"\"\" print('Testing Manual Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(ManualPipeline) print(pipeline)", "is None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None write_top_events(pipeline.top) assert pipeline.sync() ==", ") return pipeline @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_manual_pipeline(pipeline_factory): \"\"\"Exercise", "== 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(0.75) == 1.0", "make_pipeline_delayed(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), ChunkedStreamLink(), DelayedEventLink(), EventLink(), EventLink()", "== HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's clock functionality.\"\"\" print('Testing Automatic Pipeline with factory", "StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_delayed(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline", "isinstance(pipeline.top, ChunkedStreamLink) return pipeline def make_pipeline_short(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type(", "bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_automatic_pipeline_clocked(): \"\"\"Exercise AutomaticPipeline's", "{}'.format(result)) assert result == LOWER_CHUNKED_STREAM def assert_loopback_below(stack, payload): \"\"\"Asset that the stack has", "pipeline.sync() is None result = pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert", "pipeline_type( ChunkedStreamLink() ) assert isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, ChunkedStreamLink) return pipeline def make_pipeline_short(pipeline_type):", "\"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), ChunkedStreamLink(), DelayedEventLink(), EventLink(), EventLink() )", "def make_pipeline_singular(pipeline_type): \"\"\"Make a singular pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink() ) assert isinstance(pipeline.bottom,", "sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync() is None result", "pipeline.sync() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync() is None result = pipeline.bottom.to_write() print('Pipeline", "PipelineBottomCoupler from phylline.pipes import AutomaticPipe import pytest from tests.unit.links.links import HIGHER_CHUNKED_STREAM, LOWER_CHUNKED_STREAM from", "stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def assert_loopback_below(stack, payload): \"\"\"Asset that the stack", "pipeline.update_clock(0.75) == 1.0 assert not pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write()", "== HIGHER_CHUNKED_STREAM # Read/write on pipe with bidirectionl sync write_bottom_chunked_buffers(pipeline) assert pipeline.sync() is", "assert_loopback_below(stack, payload): \"\"\"Asset that the stack has correct below-loopback behavior.\"\"\" stack.send(payload) assert stack.has_receive()", "pipeline.sync_down() is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert", "== HIGHER_CHUNKED_STREAM # Read/write on pipeline write_bottom_chunked_buffers(automatic_pipeline) assert_bottom_events(automatic_pipeline) write_top_events(automatic_pipeline) result = automatic_pipeline.to_write() print('Pipeline", "pipeline = pipeline_type( EventLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, EventLink) assert isinstance(pipeline.top,", "write_bottom_chunked_buffers(manual_pipeline.bottom) assert manual_pipeline.sync() is None result = manual_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream:", "payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload) assert_loopback_below(coupler.pipeline_one, payload) def test_loopback_pipeline_bottom_coupler_event(): \"\"\"Test for", "loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_events(AutomaticPipeline) pipeline_two = AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink() ) coupler", "not pipeline.top.has_receive() assert pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0) is None", "with a loopback at the top.\"\"\" manual_pipeline = pipeline_factory( StreamLink(), StreamLink(), StreamLink(), ChunkedStreamLink(),", "assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_events(pipeline_type): \"\"\"Make a events-only", "stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on links with bidirectional sync", "pipe with bidirectionl sync write_bottom_chunked_buffers(pipeline) assert pipeline.sync() is None assert_bottom_events(pipeline) write_top_events(pipeline) assert pipeline.sync()", "= AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink() ) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4'", "EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_events(pipeline_type):", "is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) ==", "= make_pipeline_nested(AutomaticPipeline) pipeline_two = make_pipeline_loopback(AutomaticPipeline) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4'", "with PipelineBottomCoupler...') pipeline_one = make_pipeline_events(AutomaticPipeline) pipeline_two = AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink() ) coupler =", "def make_pipeline_delayed(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( StreamLink(), ChunkedStreamLink(), DelayedEventLink(), EventLink(),", "{}'.format(result)) assert result == HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's clock functionality.\"\"\" print('Testing Manual", "isinstance(pipeline.bottom, ChunkedStreamLink) assert isinstance(pipeline.top, ChunkedStreamLink) return pipeline def make_pipeline_short(pipeline_type): \"\"\"Make a short pipeline.\"\"\"", "== HIGHER_CHUNKED_STREAM def test_manual_pipeline_clocked(): \"\"\"Exercise ManualPipeline's clock functionality.\"\"\" print('Testing Manual Pipeline with factory", "pipeline def make_pipeline_short(pipeline_type): \"\"\"Make a short pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink(), EventLink(), )", "ChunkedStreamLink(), EventLink(), EventLink(), EventLink() ) assert isinstance(pipeline.bottom, StreamLink) assert isinstance(pipeline.top, EventLink) return pipeline", "ChunkedStreamLink(), EventLink(), EventLink(), EventLink(), TopLoopbackLink() ) assert isinstance(manual_pipeline.bottom, StreamLink) assert isinstance(manual_pipeline.top, TopLoopbackLink) return", "assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_nested(pipeline_type): \"\"\"Make a nested pipeline.\"\"\" pipeline =", "result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM", "EventLink(), EventLink(), EventLink(), TopLoopbackLink() ) assert isinstance(manual_pipeline.bottom, StreamLink) assert isinstance(manual_pipeline.top, TopLoopbackLink) return manual_pipeline", "assert pipeline.sync() == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert not", "pipeline_factory(AutomaticPipeline) print(automatic_pipeline) # Read/write on links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result = automatic_pipeline.bottom.to_write() print('Pipeline", "write_top_events(pipeline.top) assert pipeline.sync_down() is None result = pipeline.bottom.to_write() print('Pipeline bottom wrote to stream:", "write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result = automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert", "result == HIGHER_CHUNKED_STREAM # Read/write on links with bidirectional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync()", "# Read/write on links with directional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up() is None assert_bottom_events(pipeline.top)", "directional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync_up() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top) assert pipeline.sync_down() is None", "== HIGHER_CHUNKED_STREAM @pytest.mark.parametrize('pipeline_factory', [ make_pipeline_singular, make_pipeline_short, make_pipeline_long, make_pipeline_nested ]) def test_automatic_pipeline(pipeline_factory): \"\"\"Exercise AutomaticPipeline's", "write_top_events(automatic_pipeline) result = automatic_pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result ==", "DelayedEventLink from phylline.links.events import EventLink from phylline.links.links import ChunkedStreamLink from phylline.links.loopback import TopLoopbackLink", "pipeline.bottom.to_write() assert pipeline.update_clock(1.5) is None result = pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM def", "to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM # Read/write on pipe with bidirectionl", "with PipelineBottomCoupler...') pipeline_one = make_pipeline_nested(AutomaticPipeline) pipeline_two = make_pipeline_loopback(AutomaticPipeline) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler)", "assert pipeline.update_clock(0) == 1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(0.5) == 1.0 assert not", "= make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert pipeline.update_clock(0) is None write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.update_clock(0) == 1.0 assert", "below-loopback behavior.\"\"\" stack.send(payload) assert stack.has_receive() result = stack.receive() print('Loopback received: {}'.format(result)) assert result.data", "= stack.receive() print('Loopback received: {}'.format(result)) assert result.data == payload def test_loopback_pipeline_bottom_coupler_stream(): \"\"\"Test for", "pipeline_two = AutomaticPipeline( make_pipeline_events(AutomaticPipeline), TopLoopbackLink() ) coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload =", "EventLink from phylline.links.links import ChunkedStreamLink from phylline.links.loopback import TopLoopbackLink from phylline.links.streams import StreamLink", "Read/write on links with bidirectional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() is None assert_bottom_events(pipeline.top) write_top_events(pipeline.top)", "1.0 assert not pipeline.top.has_receive() assert pipeline.update_clock(1.0) is None assert_bottom_events(pipeline.top) print('Resetting clock...') assert pipeline.update_clock(0)", ") coupler = PipelineBottomCoupler(pipeline_one, pipeline_two) print(coupler) payload = b'\\1\\2\\3\\4' assert_loopback_below(pipeline_one.top, payload) assert_loopback_below(pipeline_one, payload)", "= pipeline.bottom.to_write() assert result == HIGHER_CHUNKED_STREAM def make_pipeline_loopback(pipeline_factory): \"\"\"Make a long pipeline with", "TopLoopbackLink() ) assert isinstance(manual_pipeline.bottom, StreamLink) assert isinstance(manual_pipeline.top, TopLoopbackLink) return manual_pipeline def test_manual_pipeline_loopback(): \"\"\"Exercise", "None result = pipeline.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result ==", "is None write_top_events(pipeline.top) assert pipeline.update_clock(0) == 1.0 assert pipeline.update_clock(0.5) == 1.0 assert not", "to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing", "phylline.links.links import ChunkedStreamLink from phylline.links.loopback import TopLoopbackLink from phylline.links.streams import StreamLink from phylline.pipelines", "= automatic_pipeline.bottom.to_write() print('Pipeline bottom wrote to stream: {}'.format(result)) assert result == HIGHER_CHUNKED_STREAM #", "EventLink) assert isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_nested(pipeline_type): \"\"\"Make a nested pipeline.\"\"\" pipeline", "assert pipeline.sync() is None assert_bottom_events(pipeline) write_top_events(pipeline) assert pipeline.sync() is None result = pipeline.to_write()", "StreamLink) assert isinstance(manual_pipeline.top, TopLoopbackLink) return manual_pipeline def test_manual_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Manual", "coupling on streams.\"\"\" print('Testing byte buffer loopback with PipelineBottomCoupler...') pipeline_one = make_pipeline_nested(AutomaticPipeline) pipeline_two", "{}:'.format(pipeline_factory.__name__)) automatic_pipeline = pipeline_factory(AutomaticPipeline) print(automatic_pipeline) # Read/write on links write_bottom_chunked_buffers(automatic_pipeline.bottom) assert_bottom_events(automatic_pipeline.top) write_top_events(automatic_pipeline.top) result", "LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's interface.\"\"\" print('Testing Automatic Pipeline with factory make_pipeline_loopback:') automatic_pipeline", "bottom wrote to stream: {}'.format(result)) assert result == LOWER_CHUNKED_STREAM def test_automatic_pipeline_loopback(): \"\"\"Exercise ManualPipeline's", "write_bottom_chunked_buffers, write_top_events ) def make_pipeline_singular(pipeline_type): \"\"\"Make a singular pipeline.\"\"\" pipeline = pipeline_type( ChunkedStreamLink()", "== HIGHER_CHUNKED_STREAM # Read/write on links with bidirectional sync write_bottom_chunked_buffers(pipeline.bottom) assert pipeline.sync() is", "from phylline.links.clocked import DelayedEventLink from phylline.links.events import EventLink from phylline.links.links import ChunkedStreamLink from", "Automatic Pipeline with factory make_pipeline_delayed:') pipeline = make_pipeline_delayed(AutomaticPipeline) print(pipeline) assert pipeline.update_clock(0) is None", "isinstance(pipeline.top, EventLink) return pipeline def make_pipeline_events(pipeline_type): \"\"\"Make a events-only pipeline.\"\"\" pipeline = pipeline_type(" ]
[ "as schema class TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self): nullstring = utils.safestring(None) self.assertFalse(nullstring) def test_safe_string_other(self): astring", "unittest import datetime import backend.fcmutils as utils import messaging.schema as schema class TestUtilityFunctions(unittest.TestCase):", "import messaging.schema as schema class TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self): nullstring = utils.safestring(None) self.assertFalse(nullstring) def", "los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los) > 0) def test_deserializelist_string(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}']", "def test_deserializelist_string(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelistofstrings(thelist, schema.MinerInfoSchema()) self.assertTrue(len(los) > 0)", "as utils import messaging.schema as schema class TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self): nullstring = utils.safestring(None)", "def test_deserializelist(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los) > 0)", "= ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los) > 0) def test_deserializelist_string(self): thelist", "datetime import backend.fcmutils as utils import messaging.schema as schema class TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self):", "import datetime import backend.fcmutils as utils import messaging.schema as schema class TestUtilityFunctions(unittest.TestCase): def", "messaging.schema as schema class TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self): nullstring = utils.safestring(None) self.assertFalse(nullstring) def test_safe_string_other(self):", "self.assertTrue(len(los) > 0) def test_deserializelist_string(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelistofstrings(thelist, schema.MinerInfoSchema())", "import unittest import datetime import backend.fcmutils as utils import messaging.schema as schema class", "import backend.fcmutils as utils import messaging.schema as schema class TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self): nullstring", "def test_safe_string_null(self): nullstring = utils.safestring(None) self.assertFalse(nullstring) def test_safe_string_other(self): astring = utils.safestring(b'test') self.assertTrue(astring) def", "utils.safestring(b'test') self.assertTrue(astring) def test_formattime(self): dtnow = utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def test_deserializelist(self): thelist = ['{\"miner_type\":\"test\",", "def test_safe_string_other(self): astring = utils.safestring(b'test') self.assertTrue(astring) def test_formattime(self): dtnow = utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def", "backend.fcmutils as utils import messaging.schema as schema class TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self): nullstring =", "self.assertTrue(dtnow) def test_deserializelist(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los) >", "> 0) def test_deserializelist_string(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelistofstrings(thelist, schema.MinerInfoSchema()) self.assertTrue(len(los)", "nullstring = utils.safestring(None) self.assertFalse(nullstring) def test_safe_string_other(self): astring = utils.safestring(b'test') self.assertTrue(astring) def test_formattime(self): dtnow", "= utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los) > 0) def test_deserializelist_string(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los", "class TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self): nullstring = utils.safestring(None) self.assertFalse(nullstring) def test_safe_string_other(self): astring = utils.safestring(b'test')", "utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def test_deserializelist(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los)", "self.assertTrue(astring) def test_formattime(self): dtnow = utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def test_deserializelist(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}']", "utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los) > 0) def test_deserializelist_string(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los =", "self.assertFalse(nullstring) def test_safe_string_other(self): astring = utils.safestring(b'test') self.assertTrue(astring) def test_formattime(self): dtnow = utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow)", "astring = utils.safestring(b'test') self.assertTrue(astring) def test_formattime(self): dtnow = utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def test_deserializelist(self): thelist", "utils import messaging.schema as schema class TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self): nullstring = utils.safestring(None) self.assertFalse(nullstring)", "= utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def test_deserializelist(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist)", "test_safe_string_null(self): nullstring = utils.safestring(None) self.assertFalse(nullstring) def test_safe_string_other(self): astring = utils.safestring(b'test') self.assertTrue(astring) def test_formattime(self):", "\"minerid\":\"test\"}'] los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los) > 0) def test_deserializelist_string(self): thelist = ['{\"miner_type\":\"test\",", "thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los) > 0) def test_deserializelist_string(self):", "test_deserializelist(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los) > 0) def", "['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelist_withschema(schema.MinerInfoSchema(), thelist) self.assertTrue(len(los) > 0) def test_deserializelist_string(self): thelist =", "<filename>fullcyclepy/tests/test_utils.py<gh_stars>10-100 import unittest import datetime import backend.fcmutils as utils import messaging.schema as schema", "utils.safestring(None) self.assertFalse(nullstring) def test_safe_string_other(self): astring = utils.safestring(b'test') self.assertTrue(astring) def test_formattime(self): dtnow = utils.formattime(datetime.datetime.now())", "test_formattime(self): dtnow = utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def test_deserializelist(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los =", "test_safe_string_other(self): astring = utils.safestring(b'test') self.assertTrue(astring) def test_formattime(self): dtnow = utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def test_deserializelist(self):", "= utils.safestring(None) self.assertFalse(nullstring) def test_safe_string_other(self): astring = utils.safestring(b'test') self.assertTrue(astring) def test_formattime(self): dtnow =", "def test_formattime(self): dtnow = utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def test_deserializelist(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los", "dtnow = utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def test_deserializelist(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelist_withschema(schema.MinerInfoSchema(),", "0) def test_deserializelist_string(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelistofstrings(thelist, schema.MinerInfoSchema()) self.assertTrue(len(los) >", "schema class TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self): nullstring = utils.safestring(None) self.assertFalse(nullstring) def test_safe_string_other(self): astring =", "thelist) self.assertTrue(len(los) > 0) def test_deserializelist_string(self): thelist = ['{\"miner_type\":\"test\", \"minerid\":\"test\"}'] los = utils.deserializelistofstrings(thelist,", "= utils.safestring(b'test') self.assertTrue(astring) def test_formattime(self): dtnow = utils.formattime(datetime.datetime.now()) self.assertTrue(dtnow) def test_deserializelist(self): thelist =", "TestUtilityFunctions(unittest.TestCase): def test_safe_string_null(self): nullstring = utils.safestring(None) self.assertFalse(nullstring) def test_safe_string_other(self): astring = utils.safestring(b'test') self.assertTrue(astring)" ]
[ "\"\"\" Sets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of the query", "None self._source_name = None self._query_results_grouped_by = None self._query_results_ordered_by = None self._time_series_interval_in_mins = None", "than one OrderedBy summary object, if the sort was multidimensional. :return: The query_results_ordered_by", "__repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return", "arguments. The following keyword arguments are supported (corresponding to the getters/setters of this", "are group by values. This is a list of ResultsGroupedBy summary objects, and", "QueryResultMetadataSummary object with values from keyword arguments. The following keyword arguments are supported", "of this QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The value to assign to", "of this QueryResultMetadataSummary. Source of the query result set (traces, spans, etc). :return:", "by which the query results are organized. This is a list of queryResultsOrderedBy", "QueryResultMetadataSummary. Interval for the time series function in minutes. :return: The time_series_interval_in_mins of", "QueryResultMetadataSummary. :rtype: int \"\"\" return self._time_series_interval_in_mins @time_series_interval_in_mins.setter def time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\" Sets the", "function in minutes. :return: The time_series_interval_in_mins of this QueryResultMetadataSummary. :rtype: int \"\"\" return", "License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown", "= { 'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name': 'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins' }", "multidimensional. :param query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by =", "select query. :return: The query_results_grouped_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return self._query_results_grouped_by", "of this QueryResultMetadataSummary. Interval for the time series function in minutes. :param time_series_interval_in_mins:", "\"\"\" Sets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series function", "list will contain as many elements as the attributes and aggregate functions in", "'source_name': 'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins' } self._query_result_row_type_summaries = None self._source_name", "Sets the source_name of this QueryResultMetadataSummary. Source of the query result set (traces,", "QueryResultRowData map. :param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries", "\"\"\" Gets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of the query", "list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The value to assign to the time_series_interval_in_mins property of this", "as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You", "one OrderedBy summary object, if the sort was multidimensional. :param query_results_ordered_by: The query_results_ordered_by", "the attributes and aggregate functions in the group by clause in the select", "this QueryResultMetadataSummary. :type: int \"\"\" self._time_series_interval_in_mins = time_series_interval_in_mins def __repr__(self): return formatted_flat_dict(self) def", "query_result_row_type_summaries @property def source_name(self): \"\"\" Gets the source_name of this QueryResultMetadataSummary. Source of", "NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultMetadataSummary(object): \"\"\"", "this QueryResultMetadataSummary. Interval for the time series function in minutes. :param time_series_interval_in_mins: The", "source_name(self, source_name): \"\"\" Sets the source_name of this QueryResultMetadataSummary. Source of the query", "this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by = query_results_ordered_by @property def time_series_interval_in_mins(self): \"\"\" Gets", "and properties of the individual row elements of the query rows being returned.", "formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultMetadataSummary(object):", "query result set. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new QueryResultMetadataSummary object", "= None self._time_series_interval_in_mins = None @property def query_result_row_type_summaries(self): \"\"\" Gets the query_result_row_type_summaries of", "\"\"\" self._query_results_grouped_by = query_results_grouped_by @property def query_results_ordered_by(self): \"\"\" Gets the query_results_ordered_by of this", "Gets the source_name of this QueryResultMetadataSummary. Source of the query result set (traces,", "OrderedBy summary object, if the sort was multidimensional. :return: The query_results_ordered_by of this", "of this QueryResultMetadataSummary. :type: int \"\"\" self._time_series_interval_in_mins = time_series_interval_in_mins def __repr__(self): return formatted_flat_dict(self)", "query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The value to assign to the time_series_interval_in_mins property of", "This is a list of queryResultsOrderedBy summary objects, and the list will contain", "the list will contain more than one OrderedBy summary object, if the sort", "ith element in this list contains the QueryResultRowTypeSummary of the ith key value", "self._source_name @source_name.setter def source_name(self, source_name): \"\"\" Sets the source_name of this QueryResultMetadataSummary. Source", "self._time_series_interval_in_mins = None @property def query_result_row_type_summaries(self): \"\"\" Gets the query_result_row_type_summaries of this QueryResultMetadataSummary.", ":type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries = query_result_row_type_summaries @property def source_name(self): \"\"\" Gets the source_name", ":return: The query_result_row_type_summaries of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def", "(corresponding to the getters/setters of this class): :param query_result_row_type_summaries: The value to assign", "list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return self._query_results_ordered_by @query_results_ordered_by.setter def query_results_ordered_by(self, query_results_ordered_by): \"\"\" Sets the query_results_ordered_by of", "@query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\" Sets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection", "(UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at", "other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return", "= None self._query_results_ordered_by = None self._time_series_interval_in_mins = None @property def query_result_row_type_summaries(self): \"\"\" Gets", "Interval for the time series function in minutes. :param time_series_interval_in_mins: The time_series_interval_in_mins of", "def query_result_row_type_summaries(self): \"\"\" Gets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary", "# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. #", "the ith key value pair in the QueryResultRowData map. :return: The query_result_row_type_summaries of", "of this QueryResultMetadataSummary. :type source_name: str :param query_results_grouped_by: The value to assign to", "Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from", ":type time_series_interval_in_mins: int \"\"\" self.swagger_types = { 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name': 'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]',", "QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map. :return: The", "query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\" Sets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary", "QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\" Sets the", "QueryResultMetadataSummary. Order by which the query results are organized. This is a list", "query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by = query_results_ordered_by @property", "time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary. :type: int \"\"\" self._time_series_interval_in_mins = time_series_interval_in_mins def", "self._time_series_interval_in_mins @time_series_interval_in_mins.setter def time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\" Sets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval", "list contains the QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData", "query_results_ordered_by(self): \"\"\" Gets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the query", "this QueryResultMetadataSummary. Source of the query result set (traces, spans, etc). :return: The", "of this QueryResultMetadataSummary. :rtype: str \"\"\" return self._source_name @source_name.setter def source_name(self, source_name): \"\"\"", "to the getters/setters of this class): :param query_result_row_type_summaries: The value to assign to", "'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins' } self._query_result_row_type_summaries = None self._source_name = None", "\"\"\" return self._query_results_ordered_by @query_results_ordered_by.setter def query_results_ordered_by(self, query_results_ordered_by): \"\"\" Sets the query_results_ordered_by of this", "in minutes. :return: The time_series_interval_in_mins of this QueryResultMetadataSummary. :rtype: int \"\"\" return self._time_series_interval_in_mins", "The query_results_grouped_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by = query_results_grouped_by @property def", "@time_series_interval_in_mins.setter def time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\" Sets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for", "self._query_results_grouped_by = query_results_grouped_by @property def query_results_ordered_by(self): \"\"\" Gets the query_results_ordered_by of this QueryResultMetadataSummary.", "as many elements as the attributes and aggregate functions in the group by", "object, if the sort was multidimensional. :return: The query_results_ordered_by of this QueryResultMetadataSummary. :rtype:", "spans, etc). :return: The source_name of this QueryResultMetadataSummary. :rtype: str \"\"\" return self._source_name", "row elements of the query rows being returned. The ith element in this", "its affiliates. All rights reserved. # This software is dual-licensed to you under", "query_results_grouped_by): \"\"\" Sets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of the", "aggregate functions in the group by clause in the select query. :return: The", "\"\"\" self._query_result_row_type_summaries = query_result_row_type_summaries @property def source_name(self): \"\"\" Gets the source_name of this", "The source_name of this QueryResultMetadataSummary. :type: str \"\"\" self._source_name = source_name @property def", "the time series function in minutes. :param time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary.", "the query_results_ordered_by property of this QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The value", "{ 'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name': 'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins' } self._query_result_row_type_summaries", "The query_result_row_type_summaries of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self,", "keyword arguments are supported (corresponding to the getters/setters of this class): :param query_result_row_type_summaries:", "'source_name': 'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int' } self.attribute_map = { 'query_result_row_type_summaries':", ":type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The value to assign to the source_name property", "of this QueryResultMetadataSummary. :type time_series_interval_in_mins: int \"\"\" self.swagger_types = { 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name':", "queryResultsOrderedBy summary objects, and the list will contain more than one OrderedBy summary", "Sets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series function in", "are organized. This is a list of queryResultsOrderedBy summary objects, and the list", "The value to assign to the source_name property of this QueryResultMetadataSummary. :type source_name:", "the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that describe the", "QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The value to assign to the time_series_interval_in_mins", "result set (traces, spans, etc). :return: The source_name of this QueryResultMetadataSummary. :rtype: str", "of this QueryResultMetadataSummary. Order by which the query results are organized. This is", "The value to assign to the query_results_ordered_by property of this QueryResultMetadataSummary. :type query_results_ordered_by:", "contain more than one OrderedBy summary object, if the sort was multidimensional. :param", "under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache", "by clause in the select query. :return: The query_results_grouped_by of this QueryResultMetadataSummary. :rtype:", "} self.attribute_map = { 'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name': 'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins':", "query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of the query rows which are", ":type source_name: str :param query_results_grouped_by: The value to assign to the query_results_grouped_by property", "the getters/setters of this class): :param query_result_row_type_summaries: The value to assign to the", "map. :param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries =", "query_results_ordered_by): \"\"\" Sets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the query", "QueryResultMetadataSummary(object): \"\"\" Summary containing the metadata about the query result set. \"\"\" def", "The value to assign to the query_results_grouped_by property of this QueryResultMetadataSummary. :type query_results_grouped_by:", ":param query_results_ordered_by: The value to assign to the query_results_ordered_by property of this QueryResultMetadataSummary.", "QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The value to assign to the source_name", "'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name': 'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins' } self._query_result_row_type_summaries =", "of queryResultsOrderedBy summary objects, and the list will contain more than one OrderedBy", "The query_results_ordered_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by = query_results_ordered_by @property def", ":type: int \"\"\" self._time_series_interval_in_mins = time_series_interval_in_mins def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other):", "values from keyword arguments. The following keyword arguments are supported (corresponding to the", "about the query result set. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new", "elements of the query rows being returned. The ith element in this list", ":type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The value to assign to the time_series_interval_in_mins property", "of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\"", "rows which are group by values. This is a list of ResultsGroupedBy summary", "(c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software", "time series function in minutes. :return: The time_series_interval_in_mins of this QueryResultMetadataSummary. :rtype: int", "a list of queryResultsOrderedBy summary objects, and the list will contain more than", "property of this QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The value to assign", ":return: The query_results_ordered_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return self._query_results_ordered_by @query_results_ordered_by.setter def", "coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights", "query rows which are group by values. This is a list of ResultsGroupedBy", "series function in minutes. :param time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary. :type: int", "int \"\"\" return self._time_series_interval_in_mins @time_series_interval_in_mins.setter def time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\" Sets the time_series_interval_in_mins of", "property of this QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The value to assign", "the type and properties of the individual row elements of the query rows", "the query result set (traces, spans, etc). :return: The source_name of this QueryResultMetadataSummary.", "The query_results_ordered_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return self._query_results_ordered_by @query_results_ordered_by.setter def query_results_ordered_by(self,", "query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that describe the type", "you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or", "self._query_result_row_type_summaries = query_result_row_type_summaries @property def source_name(self): \"\"\" Gets the source_name of this QueryResultMetadataSummary.", "\"\"\" return self._time_series_interval_in_mins @time_series_interval_in_mins.setter def time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\" Sets the time_series_interval_in_mins of this", "QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The value to assign to the query_results_ordered_by", "return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__", "object with values from keyword arguments. The following keyword arguments are supported (corresponding", "(traces, spans, etc). :return: The source_name of this QueryResultMetadataSummary. :rtype: str \"\"\" return", "query_results_grouped_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return self._query_results_grouped_by @query_results_grouped_by.setter def query_results_grouped_by(self, query_results_grouped_by):", "@property def query_result_row_type_summaries(self): \"\"\" Gets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of", "2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is", "self._source_name = source_name @property def query_results_grouped_by(self): \"\"\" Gets the query_results_grouped_by of this QueryResultMetadataSummary.", "to assign to the query_results_grouped_by property of this QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param", "\"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new QueryResultMetadataSummary object with values from", "@property def query_results_grouped_by(self): \"\"\" Gets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes", "list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The value to assign to the query_results_ordered_by property of this", "None self._query_results_ordered_by = None self._time_series_interval_in_mins = None @property def query_result_row_type_summaries(self): \"\"\" Gets the", "\"\"\" Sets the source_name of this QueryResultMetadataSummary. Source of the query result set", "query_result_row_type_summaries): \"\"\" Sets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects", "ResultsGroupedBy summary objects, and the list will contain as many elements as the", "= source_name @property def query_results_grouped_by(self): \"\"\" Gets the query_results_grouped_by of this QueryResultMetadataSummary. Columns", "init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultMetadataSummary(object): \"\"\" Summary containing the metadata about the query result", "the select query. :param query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\"", "None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self", "of the ith key value pair in the QueryResultRowData map. :return: The query_result_row_type_summaries", "'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int' } self.attribute_map = { 'query_result_row_type_summaries': 'queryResultRowTypeSummaries',", "def source_name(self, source_name): \"\"\" Sets the source_name of this QueryResultMetadataSummary. Source of the", ":param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries = query_result_row_type_summaries", "query_result_row_type_summaries property of this QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The value to", "which the query results are organized. This is a list of queryResultsOrderedBy summary", "return self._source_name @source_name.setter def source_name(self, source_name): \"\"\" Sets the source_name of this QueryResultMetadataSummary.", "def query_results_ordered_by(self): \"\"\" Gets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the", "self._query_results_grouped_by @query_results_grouped_by.setter def query_results_grouped_by(self, query_results_grouped_by): \"\"\" Sets the query_results_grouped_by of this QueryResultMetadataSummary. Columns", "the query rows which are group by values. This is a list of", "import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class", "for the time series function in minutes. :return: The time_series_interval_in_mins of this QueryResultMetadataSummary.", "more than one OrderedBy summary object, if the sort was multidimensional. :param query_results_ordered_by:", "query_result_row_type_summaries(self): \"\"\" Gets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects", "a list of ResultsGroupedBy summary objects, and the list will contain as many", "'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int' } self.attribute_map = { 'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name': 'sourceName', 'query_results_grouped_by':", "more than one OrderedBy summary object, if the sort was multidimensional. :return: The", "query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The value to assign to the source_name property of", "this QueryResultMetadataSummary. :rtype: int \"\"\" return self._time_series_interval_in_mins @time_series_interval_in_mins.setter def time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\" Sets", "self._query_results_grouped_by = None self._query_results_ordered_by = None self._time_series_interval_in_mins = None @property def query_result_row_type_summaries(self): \"\"\"", "(traces, spans, etc). :param source_name: The source_name of this QueryResultMetadataSummary. :type: str \"\"\"", "source_name: The value to assign to the source_name property of this QueryResultMetadataSummary. :type", "QueryResultRowTypeSummary objects that describe the type and properties of the individual row elements", "QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by = query_results_ordered_by @property def time_series_interval_in_mins(self): \"\"\" Gets the", "by clause in the select query. :param query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary.", "list will contain more than one OrderedBy summary object, if the sort was", "QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return self._query_results_ordered_by @query_results_ordered_by.setter def query_results_ordered_by(self, query_results_ordered_by): \"\"\" Sets the", "str \"\"\" self._source_name = source_name @property def query_results_grouped_by(self): \"\"\" Gets the query_results_grouped_by of", "value to assign to the query_results_grouped_by property of this QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary]", "this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries = query_result_row_type_summaries @property def source_name(self): \"\"\" Gets", "other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self,", "summary objects, and the list will contain more than one OrderedBy summary object,", "QueryResultMetadataSummary. :type time_series_interval_in_mins: int \"\"\" self.swagger_types = { 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name': 'str', 'query_results_grouped_by':", "of this QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The value to assign to", "to the time_series_interval_in_mins property of this QueryResultMetadataSummary. :type time_series_interval_in_mins: int \"\"\" self.swagger_types =", "\"\"\" self._query_results_ordered_by = query_results_ordered_by @property def time_series_interval_in_mins(self): \"\"\" Gets the time_series_interval_in_mins of this", ":return: The source_name of this QueryResultMetadataSummary. :rtype: str \"\"\" return self._source_name @source_name.setter def", "software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as", "'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins' } self._query_result_row_type_summaries = None self._source_name = None self._query_results_grouped_by", "Gets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series function in", "getters/setters of this class): :param query_result_row_type_summaries: The value to assign to the query_result_row_type_summaries", "to the source_name property of this QueryResultMetadataSummary. :type source_name: str :param query_results_grouped_by: The", ":param query_results_grouped_by: The value to assign to the query_results_grouped_by property of this QueryResultMetadataSummary.", "{ 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name': 'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int' } self.attribute_map", "the individual row elements of the query rows being returned. The ith element", "returned. The ith element in this list contains the QueryResultRowTypeSummary of the ith", "return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\" Sets the query_result_row_type_summaries of this QueryResultMetadataSummary.", "self.attribute_map = { 'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name': 'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins'", "str \"\"\" return self._source_name @source_name.setter def source_name(self, source_name): \"\"\" Sets the source_name of", "of this QueryResultMetadataSummary. Interval for the time series function in minutes. :return: The", "QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map. :param query_result_row_type_summaries:", "contain more than one OrderedBy summary object, if the sort was multidimensional. :return:", "QueryResultMetadataSummary. Source of the query result set (traces, spans, etc). :param source_name: The", "object, if the sort was multidimensional. :param query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary.", "http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel #", "query rows being returned. The ith element in this list contains the QueryResultRowTypeSummary", "@property def time_series_interval_in_mins(self): \"\"\" Gets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the", "elements as the attributes and aggregate functions in the group by clause in", "properties of the individual row elements of the query rows being returned. The", "query_results_grouped_by(self, query_results_grouped_by): \"\"\" Sets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of", "is a list of queryResultsOrderedBy summary objects, and the list will contain more", "time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series function in minutes. :param", "'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins' } self._query_result_row_type_summaries = None self._source_name = None self._query_results_grouped_by = None", "int \"\"\" self._time_series_interval_in_mins = time_series_interval_in_mins def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if", "query result set (traces, spans, etc). :return: The source_name of this QueryResultMetadataSummary. :rtype:", "to the query_results_grouped_by property of this QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The", "return self._time_series_interval_in_mins @time_series_interval_in_mins.setter def time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\" Sets the time_series_interval_in_mins of this QueryResultMetadataSummary.", "if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other):", "of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return self._query_results_ordered_by @query_results_ordered_by.setter def query_results_ordered_by(self, query_results_ordered_by): \"\"\"", "minutes. :param time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary. :type: int \"\"\" self._time_series_interval_in_mins =", "return self._query_results_ordered_by @query_results_ordered_by.setter def query_results_ordered_by(self, query_results_ordered_by): \"\"\" Sets the query_results_ordered_by of this QueryResultMetadataSummary.", "query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The value to assign to the query_results_ordered_by property of", "is a list of ResultsGroupedBy summary objects, and the list will contain as", "query results are organized. This is a list of queryResultsOrderedBy summary objects, and", "set (traces, spans, etc). :param source_name: The source_name of this QueryResultMetadataSummary. :type: str", "Columns or attributes of the query rows which are group by values. This", "query_results_ordered_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return self._query_results_ordered_by @query_results_ordered_by.setter def query_results_ordered_by(self, query_results_ordered_by):", ":param time_series_interval_in_mins: The value to assign to the time_series_interval_in_mins property of this QueryResultMetadataSummary.", "may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401", "the sort was multidimensional. :param query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary]", "Sets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that describe", "this QueryResultMetadataSummary. Order by which the query results are organized. This is a", "query. :param query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by =", "query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries = query_result_row_type_summaries @property", "query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by = query_results_grouped_by @property", "License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util", "containing the metadata about the query result set. \"\"\" def __init__(self, **kwargs): \"\"\"", "source_name of this QueryResultMetadataSummary. :type: str \"\"\" self._source_name = source_name @property def query_results_grouped_by(self):", "for the time series function in minutes. :param time_series_interval_in_mins: The time_series_interval_in_mins of this", "F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultMetadataSummary(object): \"\"\" Summary containing the metadata", "\"\"\" Gets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series function", "query_result_row_type_summaries of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries):", "} self._query_result_row_type_summaries = None self._source_name = None self._query_results_grouped_by = None self._query_results_ordered_by = None", "the QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map. :return:", "the query result set (traces, spans, etc). :param source_name: The source_name of this", ":rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return self._query_results_ordered_by @query_results_ordered_by.setter def query_results_ordered_by(self, query_results_ordered_by): \"\"\" Sets the query_results_ordered_by", "of this QueryResultMetadataSummary. :type: str \"\"\" self._source_name = source_name @property def query_results_grouped_by(self): \"\"\"", "list of ResultsGroupedBy summary objects, and the list will contain as many elements", "Sets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of the query rows", "objects, and the list will contain more than one OrderedBy summary object, if", "was multidimensional. :param query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by", "a new QueryResultMetadataSummary object with values from keyword arguments. The following keyword arguments", ":return: The query_results_grouped_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return self._query_results_grouped_by @query_results_grouped_by.setter def", "contains the QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map.", "time_series_interval_in_mins property of this QueryResultMetadataSummary. :type time_series_interval_in_mins: int \"\"\" self.swagger_types = { 'query_result_row_type_summaries':", "in minutes. :param time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary. :type: int \"\"\" self._time_series_interval_in_mins", "this QueryResultMetadataSummary. Source of the query result set (traces, spans, etc). :param source_name:", "the query result set. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new QueryResultMetadataSummary", "the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License", "functions in the group by clause in the select query. :param query_results_grouped_by: The", "'time_series_interval_in_mins': 'timeSeriesIntervalInMins' } self._query_result_row_type_summaries = None self._source_name = None self._query_results_grouped_by = None self._query_results_ordered_by", "QueryResultMetadataSummary. :type source_name: str :param query_results_grouped_by: The value to assign to the query_results_grouped_by", "time_series_interval_in_mins): \"\"\" Sets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series", "results are organized. This is a list of queryResultsOrderedBy summary objects, and the", ":return: The time_series_interval_in_mins of this QueryResultMetadataSummary. :rtype: int \"\"\" return self._time_series_interval_in_mins @time_series_interval_in_mins.setter def", "def __init__(self, **kwargs): \"\"\" Initializes a new QueryResultMetadataSummary object with values from keyword", "The time_series_interval_in_mins of this QueryResultMetadataSummary. :type: int \"\"\" self._time_series_interval_in_mins = time_series_interval_in_mins def __repr__(self):", ":param query_result_row_type_summaries: The value to assign to the query_result_row_type_summaries property of this QueryResultMetadataSummary.", "A collection of QueryResultRowTypeSummary objects that describe the type and properties of the", "\"\"\" Summary containing the metadata about the query result set. \"\"\" def __init__(self,", ":param query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by = query_results_grouped_by", "query_results_ordered_by @property def time_series_interval_in_mins(self): \"\"\" Gets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for", "ith key value pair in the QueryResultRowData map. :param query_result_row_type_summaries: The query_result_row_type_summaries of", "\"\"\" Gets the source_name of this QueryResultMetadataSummary. Source of the query result set", "map. :return: The query_result_row_type_summaries of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return self._query_result_row_type_summaries @query_result_row_type_summaries.setter", "assign to the query_result_row_type_summaries property of this QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name:", "assign to the source_name property of this QueryResultMetadataSummary. :type source_name: str :param query_results_grouped_by:", "the query results are organized. This is a list of queryResultsOrderedBy summary objects,", "objects that describe the type and properties of the individual row elements of", "https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either", "source_name): \"\"\" Sets the source_name of this QueryResultMetadataSummary. Source of the query result", "shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may", "from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters", "QueryResultMetadataSummary. :rtype: str \"\"\" return self._source_name @source_name.setter def source_name(self, source_name): \"\"\" Sets the", "functions in the group by clause in the select query. :return: The query_results_grouped_by", "def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__", "'list[QueryResultRowTypeSummary]', 'source_name': 'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int' } self.attribute_map = {", "aggregate functions in the group by clause in the select query. :param query_results_grouped_by:", "value to assign to the source_name property of this QueryResultMetadataSummary. :type source_name: str", "Summary containing the metadata about the query result set. \"\"\" def __init__(self, **kwargs):", "= { 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name': 'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int' }", "key value pair in the QueryResultRowData map. :param query_result_row_type_summaries: The query_result_row_type_summaries of this", "\"\"\" Gets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that", "query_results_grouped_by property of this QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The value to", "Gets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that describe", "def time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\" Sets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the", "self._query_results_ordered_by = None self._time_series_interval_in_mins = None @property def query_result_row_type_summaries(self): \"\"\" Gets the query_result_row_type_summaries", "'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int' } self.attribute_map = { 'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name': 'sourceName',", "__eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def", ":type: str \"\"\" self._source_name = source_name @property def query_results_grouped_by(self): \"\"\" Gets the query_results_grouped_by", "describe the type and properties of the individual row elements of the query", "from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs", "of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by = query_results_grouped_by @property def query_results_ordered_by(self): \"\"\"", "spans, etc). :param source_name: The source_name of this QueryResultMetadataSummary. :type: str \"\"\" self._source_name", "QueryResultMetadataSummary. Source of the query result set (traces, spans, etc). :return: The source_name", "and aggregate functions in the group by clause in the select query. :return:", ":param time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary. :type: int \"\"\" self._time_series_interval_in_mins = time_series_interval_in_mins", "Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This", "and/or its affiliates. All rights reserved. # This software is dual-licensed to you", "\"\"\" self.swagger_types = { 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name': 'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins':", "the QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map. :param", "'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int' } self.attribute_map = { 'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name':", "'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name': 'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int' } self.attribute_map =", "time series function in minutes. :param time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary. :type:", "list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The value to assign to the source_name property of this", "of this QueryResultMetadataSummary. Source of the query result set (traces, spans, etc). :param", "many elements as the attributes and aggregate functions in the group by clause", "return self._query_results_grouped_by @query_results_grouped_by.setter def query_results_grouped_by(self, query_results_grouped_by): \"\"\" Sets the query_results_grouped_by of this QueryResultMetadataSummary.", "= query_result_row_type_summaries @property def source_name(self): \"\"\" Gets the source_name of this QueryResultMetadataSummary. Source", "def query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\" Sets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of", "clause in the select query. :param query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary. :type:", "\"\"\" Sets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that", "This is a list of ResultsGroupedBy summary objects, and the list will contain", "this QueryResultMetadataSummary. Columns or attributes of the query rows which are group by", "list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by = query_results_ordered_by @property def time_series_interval_in_mins(self): \"\"\" Gets the time_series_interval_in_mins of", "of the ith key value pair in the QueryResultRowData map. :param query_result_row_type_summaries: The", "this QueryResultMetadataSummary. :type: str \"\"\" self._source_name = source_name @property def query_results_grouped_by(self): \"\"\" Gets", "utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.", "is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown", "the sort was multidimensional. :return: The query_results_ordered_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\"", "assign to the query_results_grouped_by property of this QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by:", "than one OrderedBy summary object, if the sort was multidimensional. :param query_results_ordered_by: The", "= time_series_interval_in_mins def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None:", "or attributes of the query rows which are group by values. This is", "\"\"\" return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\" Sets the query_result_row_type_summaries of this", "as the attributes and aggregate functions in the group by clause in the", "list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by = query_results_grouped_by @property def query_results_ordered_by(self): \"\"\" Gets the query_results_ordered_by of", "collection of QueryResultRowTypeSummary objects that describe the type and properties of the individual", "query_results_grouped_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by = query_results_grouped_by @property def query_results_ordered_by(self):", "You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa:", "query_results_ordered_by of this QueryResultMetadataSummary. Order by which the query results are organized. This", "arguments are supported (corresponding to the getters/setters of this class): :param query_result_row_type_summaries: The", "def time_series_interval_in_mins(self): \"\"\" Gets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time", "time_series_interval_in_mins: The value to assign to the time_series_interval_in_mins property of this QueryResultMetadataSummary. :type", "self.swagger_types = { 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name': 'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int'", "self._query_results_ordered_by = query_results_ordered_by @property def time_series_interval_in_mins(self): \"\"\" Gets the time_series_interval_in_mins of this QueryResultMetadataSummary.", "query_result_row_type_summaries: The value to assign to the query_result_row_type_summaries property of this QueryResultMetadataSummary. :type", "Source of the query result set (traces, spans, etc). :param source_name: The source_name", "from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultMetadataSummary(object): \"\"\" Summary containing the metadata about", "self._query_result_row_type_summaries = None self._source_name = None self._query_results_grouped_by = None self._query_results_ordered_by = None self._time_series_interval_in_mins", "value to assign to the query_results_ordered_by property of this QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary]", "str :param query_results_grouped_by: The value to assign to the query_results_grouped_by property of this", "was multidimensional. :return: The query_results_ordered_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return self._query_results_ordered_by", "at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose", "in the group by clause in the select query. :param query_results_grouped_by: The query_results_grouped_by", "This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0", "the ith key value pair in the QueryResultRowData map. :param query_result_row_type_summaries: The query_result_row_type_summaries", "time_series_interval_in_mins def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return", "property of this QueryResultMetadataSummary. :type time_series_interval_in_mins: int \"\"\" self.swagger_types = { 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]',", "to the query_results_ordered_by property of this QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The", "= None self._query_results_grouped_by = None self._query_results_ordered_by = None self._time_series_interval_in_mins = None @property def", "in the QueryResultRowData map. :param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary]", "time_series_interval_in_mins(self): \"\"\" Gets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series", "function in minutes. :param time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary. :type: int \"\"\"", "source_name @property def query_results_grouped_by(self): \"\"\" Gets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or", "are supported (corresponding to the getters/setters of this class): :param query_result_row_type_summaries: The value", "this QueryResultMetadataSummary. Interval for the time series function in minutes. :return: The time_series_interval_in_mins", "contain as many elements as the attributes and aggregate functions in the group", "class): :param query_result_row_type_summaries: The value to assign to the query_result_row_type_summaries property of this", "of this QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The value to assign to", "of this class): :param query_result_row_type_summaries: The value to assign to the query_result_row_type_summaries property", "sort was multidimensional. :return: The query_results_ordered_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return", "to assign to the source_name property of this QueryResultMetadataSummary. :type source_name: str :param", "def source_name(self): \"\"\" Gets the source_name of this QueryResultMetadataSummary. Source of the query", "value pair in the QueryResultRowData map. :return: The query_result_row_type_summaries of this QueryResultMetadataSummary. :rtype:", "ith key value pair in the QueryResultRowData map. :return: The query_result_row_type_summaries of this", "license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import", "list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return self._query_results_grouped_by @query_results_grouped_by.setter def query_results_grouped_by(self, query_results_grouped_by): \"\"\" Sets the query_results_grouped_by of", "by values. This is a list of ResultsGroupedBy summary objects, and the list", "value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultMetadataSummary(object): \"\"\" Summary", "__init__(self, **kwargs): \"\"\" Initializes a new QueryResultMetadataSummary object with values from keyword arguments.", "@init_model_state_from_kwargs class QueryResultMetadataSummary(object): \"\"\" Summary containing the metadata about the query result set.", "query_results_grouped_by: The value to assign to the query_results_grouped_by property of this QueryResultMetadataSummary. :type", "to assign to the query_results_ordered_by property of this QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param", "the select query. :return: The query_results_grouped_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return", "= None @property def query_result_row_type_summaries(self): \"\"\" Gets the query_result_row_type_summaries of this QueryResultMetadataSummary. A", "reserved. # This software is dual-licensed to you under the Universal Permissive License", "'timeSeriesIntervalInMins' } self._query_result_row_type_summaries = None self._source_name = None self._query_results_grouped_by = None self._query_results_ordered_by =", "QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that describe the type and properties of", "of the individual row elements of the query rows being returned. The ith", "assign to the query_results_ordered_by property of this QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins:", "import init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultMetadataSummary(object): \"\"\" Summary containing the metadata about the query", "the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the query results are organized.", "pair in the QueryResultRowData map. :param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary. :type:", "of the query rows which are group by values. This is a list", "OrderedBy summary object, if the sort was multidimensional. :param query_results_ordered_by: The query_results_ordered_by of", "or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.", "The ith element in this list contains the QueryResultRowTypeSummary of the ith key", "and the list will contain as many elements as the attributes and aggregate", "Sets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the query results are", "the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series function in minutes.", ":rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return self._query_results_grouped_by @query_results_grouped_by.setter def query_results_grouped_by(self, query_results_grouped_by): \"\"\" Sets the query_results_grouped_by", "QueryResultMetadataSummary. Interval for the time series function in minutes. :param time_series_interval_in_mins: The time_series_interval_in_mins", "'int' } self.attribute_map = { 'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name': 'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy',", "list of queryResultsOrderedBy summary objects, and the list will contain more than one", "this QueryResultMetadataSummary. :type source_name: str :param query_results_grouped_by: The value to assign to the", "QueryResultMetadataSummary. :type: int \"\"\" self._time_series_interval_in_mins = time_series_interval_in_mins def __repr__(self): return formatted_flat_dict(self) def __eq__(self,", "the source_name of this QueryResultMetadataSummary. Source of the query result set (traces, spans,", "@query_results_grouped_by.setter def query_results_grouped_by(self, query_results_grouped_by): \"\"\" Sets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or", "of the query rows being returned. The ith element in this list contains", "organized. This is a list of queryResultsOrderedBy summary objects, and the list will", "this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that describe the type and properties", "time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\" Sets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time", "sort was multidimensional. :param query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\"", "\"\"\" return self._source_name @source_name.setter def source_name(self, source_name): \"\"\" Sets the source_name of this", "oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultMetadataSummary(object): \"\"\" Summary containing the metadata about the", "objects, and the list will contain as many elements as the attributes and", "The value to assign to the query_result_row_type_summaries property of this QueryResultMetadataSummary. :type query_result_row_type_summaries:", "time_series_interval_in_mins of this QueryResultMetadataSummary. Interval for the time series function in minutes. :return:", "@source_name.setter def source_name(self, source_name): \"\"\" Sets the source_name of this QueryResultMetadataSummary. Source of", "being returned. The ith element in this list contains the QueryResultRowTypeSummary of the", "'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins' } self._query_result_row_type_summaries = None self._source_name =", "group by clause in the select query. :param query_results_grouped_by: The query_results_grouped_by of this", "@query_results_ordered_by.setter def query_results_ordered_by(self, query_results_ordered_by): \"\"\" Sets the query_results_ordered_by of this QueryResultMetadataSummary. Order by", ":rtype: str \"\"\" return self._source_name @source_name.setter def source_name(self, source_name): \"\"\" Sets the source_name", "noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultMetadataSummary(object): \"\"\" Summary containing the", "the QueryResultRowData map. :return: The query_result_row_type_summaries of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return", "self._source_name = None self._query_results_grouped_by = None self._query_results_ordered_by = None self._time_series_interval_in_mins = None @property", "Source of the query result set (traces, spans, etc). :return: The source_name of", "@property def source_name(self): \"\"\" Gets the source_name of this QueryResultMetadataSummary. Source of the", "1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0.", "list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries = query_result_row_type_summaries @property def source_name(self): \"\"\" Gets the source_name of", "<gh_stars>100-1000 # coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates.", "series function in minutes. :return: The time_series_interval_in_mins of this QueryResultMetadataSummary. :rtype: int \"\"\"", "summary objects, and the list will contain as many elements as the attributes", "in this list contains the QueryResultRowTypeSummary of the ith key value pair in", "self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\" Sets the query_result_row_type_summaries of this QueryResultMetadataSummary. A", "set. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new QueryResultMetadataSummary object with values", "source_name of this QueryResultMetadataSummary. Source of the query result set (traces, spans, etc).", "def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False", "choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from", "etc). :return: The source_name of this QueryResultMetadataSummary. :rtype: str \"\"\" return self._source_name @source_name.setter", "Gets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the query results are", "query_result_row_type_summaries of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries = query_result_row_type_summaries @property def source_name(self):", "property of this QueryResultMetadataSummary. :type source_name: str :param query_results_grouped_by: The value to assign", "source_name: The source_name of this QueryResultMetadataSummary. :type: str \"\"\" self._source_name = source_name @property", "# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All", "this QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The value to assign to the", "source_name of this QueryResultMetadataSummary. :rtype: str \"\"\" return self._source_name @source_name.setter def source_name(self, source_name):", "query_results_ordered_by: The value to assign to the query_results_ordered_by property of this QueryResultMetadataSummary. :type", "attributes of the query rows which are group by values. This is a", "2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed", "the time_series_interval_in_mins property of this QueryResultMetadataSummary. :type time_series_interval_in_mins: int \"\"\" self.swagger_types = {", "\"\"\" return self._query_results_grouped_by @query_results_grouped_by.setter def query_results_grouped_by(self, query_results_grouped_by): \"\"\" Sets the query_results_grouped_by of this", "'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins' } self._query_result_row_type_summaries = None self._source_name = None self._query_results_grouped_by =", "at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel", "'queryResultRowTypeSummaries', 'source_name': 'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by': 'queryResultsOrderedBy', 'time_series_interval_in_mins': 'timeSeriesIntervalInMins' } self._query_result_row_type_summaries = None", "set (traces, spans, etc). :return: The source_name of this QueryResultMetadataSummary. :rtype: str \"\"\"", "The value to assign to the time_series_interval_in_mins property of this QueryResultMetadataSummary. :type time_series_interval_in_mins:", "this QueryResultMetadataSummary. :rtype: str \"\"\" return self._source_name @source_name.setter def source_name(self, source_name): \"\"\" Sets", "pair in the QueryResultRowData map. :return: The query_result_row_type_summaries of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary]", "assign to the time_series_interval_in_mins property of this QueryResultMetadataSummary. :type time_series_interval_in_mins: int \"\"\" self.swagger_types", "Gets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of the query rows", "multidimensional. :return: The query_results_ordered_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return self._query_results_ordered_by @query_results_ordered_by.setter", ":type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by = query_results_grouped_by @property def query_results_ordered_by(self): \"\"\" Gets the query_results_ordered_by", "clause in the select query. :return: The query_results_grouped_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary]", "is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not", "this QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The value to assign to the", "# noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultMetadataSummary(object): \"\"\" Summary containing", "The query_results_grouped_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return self._query_results_grouped_by @query_results_grouped_by.setter def query_results_grouped_by(self,", "of this QueryResultMetadataSummary. :rtype: int \"\"\" return self._time_series_interval_in_mins @time_series_interval_in_mins.setter def time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\"", "\"\"\" Initializes a new QueryResultMetadataSummary object with values from keyword arguments. The following", "query_results_grouped_by @property def query_results_ordered_by(self): \"\"\" Gets the query_results_ordered_by of this QueryResultMetadataSummary. Order by", "= query_results_ordered_by @property def time_series_interval_in_mins(self): \"\"\" Gets the time_series_interval_in_mins of this QueryResultMetadataSummary. Interval", "The following keyword arguments are supported (corresponding to the getters/setters of this class):", "to assign to the time_series_interval_in_mins property of this QueryResultMetadataSummary. :type time_series_interval_in_mins: int \"\"\"", "the query_results_grouped_by property of this QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The value", "the group by clause in the select query. :param query_results_grouped_by: The query_results_grouped_by of", "self._time_series_interval_in_mins = time_series_interval_in_mins def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is", ":type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The value to assign to the query_results_ordered_by property", "All rights reserved. # This software is dual-licensed to you under the Universal", "def query_results_ordered_by(self, query_results_ordered_by): \"\"\" Sets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which", "of this QueryResultMetadataSummary. Columns or attributes of the query rows which are group", "formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ ==", "property of this QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The value to assign", "in the group by clause in the select query. :return: The query_results_grouped_by of", ":type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by = query_results_ordered_by @property def time_series_interval_in_mins(self): \"\"\" Gets the time_series_interval_in_mins", "to assign to the query_result_row_type_summaries property of this QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param", "of QueryResultRowTypeSummary objects that describe the type and properties of the individual row", "**kwargs): \"\"\" Initializes a new QueryResultMetadataSummary object with values from keyword arguments. The", "either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators", "this QueryResultMetadataSummary. :type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary] :param query_results_ordered_by: The value to assign to the", "will contain more than one OrderedBy summary object, if the sort was multidimensional.", "self._query_results_ordered_by @query_results_ordered_by.setter def query_results_ordered_by(self, query_results_ordered_by): \"\"\" Sets the query_results_ordered_by of this QueryResultMetadataSummary. Order", "def query_results_grouped_by(self): \"\"\" Gets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of", "Order by which the query results are organized. This is a list of", "None self._time_series_interval_in_mins = None @property def query_result_row_type_summaries(self): \"\"\" Gets the query_result_row_type_summaries of this", "'list[QueryResultsOrderedBySummary]', 'time_series_interval_in_mins': 'int' } self.attribute_map = { 'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name': 'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy',", "Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as", "QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries = query_result_row_type_summaries @property def source_name(self): \"\"\" Gets the", "result set (traces, spans, etc). :param source_name: The source_name of this QueryResultMetadataSummary. :type:", "and aggregate functions in the group by clause in the select query. :param", "source_name: str :param query_results_grouped_by: The value to assign to the query_results_grouped_by property of", "None @property def query_result_row_type_summaries(self): \"\"\" Gets the query_result_row_type_summaries of this QueryResultMetadataSummary. A collection", "new QueryResultMetadataSummary object with values from keyword arguments. The following keyword arguments are", "source_name(self): \"\"\" Gets the source_name of this QueryResultMetadataSummary. Source of the query result", "class QueryResultMetadataSummary(object): \"\"\" Summary containing the metadata about the query result set. \"\"\"", ":param source_name: The value to assign to the source_name property of this QueryResultMetadataSummary.", "which are group by values. This is a list of ResultsGroupedBy summary objects,", "individual row elements of the query rows being returned. The ith element in", "the QueryResultRowData map. :param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\"", "the group by clause in the select query. :return: The query_results_grouped_by of this", ":rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\" Sets the query_result_row_type_summaries", "list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\" Sets the query_result_row_type_summaries of", "The query_result_row_type_summaries of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries = query_result_row_type_summaries @property def", "Interval for the time series function in minutes. :return: The time_series_interval_in_mins of this", "to the query_result_row_type_summaries property of this QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The", "the time series function in minutes. :return: The time_series_interval_in_mins of this QueryResultMetadataSummary. :rtype:", "rights reserved. # This software is dual-licensed to you under the Universal Permissive", "element in this list contains the QueryResultRowTypeSummary of the ith key value pair", "this QueryResultMetadataSummary. :type time_series_interval_in_mins: int \"\"\" self.swagger_types = { 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name': 'str',", "time_series_interval_in_mins of this QueryResultMetadataSummary. :type: int \"\"\" self._time_series_interval_in_mins = time_series_interval_in_mins def __repr__(self): return", "the query rows being returned. The ith element in this list contains the", "select query. :param query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by", "one OrderedBy summary object, if the sort was multidimensional. :return: The query_results_ordered_by of", "the metadata about the query result set. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes", "2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import", "value to assign to the time_series_interval_in_mins property of this QueryResultMetadataSummary. :type time_series_interval_in_mins: int", "return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self ==", "that describe the type and properties of the individual row elements of the", "supported (corresponding to the getters/setters of this class): :param query_result_row_type_summaries: The value to", "minutes. :return: The time_series_interval_in_mins of this QueryResultMetadataSummary. :rtype: int \"\"\" return self._time_series_interval_in_mins @time_series_interval_in_mins.setter", "the list will contain as many elements as the attributes and aggregate functions", "\"\"\" Sets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the query results", "shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL,", "None self._query_results_grouped_by = None self._query_results_ordered_by = None self._time_series_interval_in_mins = None @property def query_result_row_type_summaries(self):", "value pair in the QueryResultRowData map. :param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary.", "if the sort was multidimensional. :return: The query_results_ordered_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary]", "= query_results_grouped_by @property def query_results_ordered_by(self): \"\"\" Gets the query_results_ordered_by of this QueryResultMetadataSummary. Order", "summary object, if the sort was multidimensional. :return: The query_results_ordered_by of this QueryResultMetadataSummary.", "The source_name of this QueryResultMetadataSummary. :rtype: str \"\"\" return self._source_name @source_name.setter def source_name(self,", "\"\"\" Gets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the query results", "oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs", "dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at", "affiliates. All rights reserved. # This software is dual-licensed to you under the", "as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict,", "int \"\"\" self.swagger_types = { 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name': 'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]',", "with values from keyword arguments. The following keyword arguments are supported (corresponding to", "to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl", ":param source_name: The source_name of this QueryResultMetadataSummary. :type: str \"\"\" self._source_name = source_name", "# This software is dual-licensed to you under the Universal Permissive License (UPL)", "type and properties of the individual row elements of the query rows being", "query_results_ordered_by property of this QueryResultMetadataSummary. :type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary] :param time_series_interval_in_mins: The value to", "the query_result_row_type_summaries property of this QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] :param source_name: The value", "= None self._source_name = None self._query_results_grouped_by = None self._query_results_ordered_by = None self._time_series_interval_in_mins =", "QueryResultRowData map. :return: The query_result_row_type_summaries of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return self._query_result_row_type_summaries", "@property def query_results_ordered_by(self): \"\"\" Gets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which", "query_results_ordered_by(self, query_results_ordered_by): \"\"\" Sets the query_results_ordered_by of this QueryResultMetadataSummary. Order by which the", "time_series_interval_in_mins: int \"\"\" self.swagger_types = { 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]', 'source_name': 'str', 'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]', 'query_results_ordered_by':", "and the list will contain more than one OrderedBy summary object, if the", "\"\"\" self._time_series_interval_in_mins = time_series_interval_in_mins def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other", "this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries): \"\"\" Sets", "The time_series_interval_in_mins of this QueryResultMetadataSummary. :rtype: int \"\"\" return self._time_series_interval_in_mins @time_series_interval_in_mins.setter def time_series_interval_in_mins(self,", "in the select query. :return: The query_results_grouped_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\"", "in the select query. :param query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary]", "Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to", "this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return self._query_results_grouped_by @query_results_grouped_by.setter def query_results_grouped_by(self, query_results_grouped_by): \"\"\" Sets", "metadata about the query result set. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a", "query_results_ordered_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by = query_results_ordered_by @property def time_series_interval_in_mins(self):", "of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return self._query_results_grouped_by @query_results_grouped_by.setter def query_results_grouped_by(self, query_results_grouped_by): \"\"\"", "of ResultsGroupedBy summary objects, and the list will contain as many elements as", "rows being returned. The ith element in this list contains the QueryResultRowTypeSummary of", "in the QueryResultRowData map. :return: The query_result_row_type_summaries of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\"", "summary object, if the sort was multidimensional. :param query_results_ordered_by: The query_results_ordered_by of this", "value to assign to the query_result_row_type_summaries property of this QueryResultMetadataSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary]", "source_name property of this QueryResultMetadataSummary. :type source_name: str :param query_results_grouped_by: The value to", "group by clause in the select query. :return: The query_results_grouped_by of this QueryResultMetadataSummary.", "this class): :param query_result_row_type_summaries: The value to assign to the query_result_row_type_summaries property of", "time_series_interval_in_mins of this QueryResultMetadataSummary. :rtype: int \"\"\" return self._time_series_interval_in_mins @time_series_interval_in_mins.setter def time_series_interval_in_mins(self, time_series_interval_in_mins):", "key value pair in the QueryResultRowData map. :return: The query_result_row_type_summaries of this QueryResultMetadataSummary.", "of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] \"\"\" self._query_result_row_type_summaries = query_result_row_type_summaries @property def source_name(self): \"\"\"", "QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return self._query_results_grouped_by @query_results_grouped_by.setter def query_results_grouped_by(self, query_results_grouped_by): \"\"\" Sets the", "QueryResultMetadataSummary. :type: str \"\"\" self._source_name = source_name @property def query_results_grouped_by(self): \"\"\" Gets the", "attributes and aggregate functions in the group by clause in the select query.", "of the query result set (traces, spans, etc). :param source_name: The source_name of", "QueryResultMetadataSummary. Columns or attributes of the query rows which are group by values.", "group by values. This is a list of ResultsGroupedBy summary objects, and the", "Initializes a new QueryResultMetadataSummary object with values from keyword arguments. The following keyword", "following keyword arguments are supported (corresponding to the getters/setters of this class): :param", "the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of the query rows which", "etc). :param source_name: The source_name of this QueryResultMetadataSummary. :type: str \"\"\" self._source_name =", ":param query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by = query_results_ordered_by", "QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by = query_results_grouped_by @property def query_results_ordered_by(self): \"\"\" Gets the", "result set. \"\"\" def __init__(self, **kwargs): \"\"\" Initializes a new QueryResultMetadataSummary object with", "query result set (traces, spans, etc). :param source_name: The source_name of this QueryResultMetadataSummary.", "if the sort was multidimensional. :param query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary. :type:", "will contain as many elements as the attributes and aggregate functions in the", "of this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" self._query_results_ordered_by = query_results_ordered_by @property def time_series_interval_in_mins(self): \"\"\"", "of this QueryResultMetadataSummary. A collection of QueryResultRowTypeSummary objects that describe the type and", "query_results_grouped_by(self): \"\"\" Gets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes of the", "'time_series_interval_in_mins': 'int' } self.attribute_map = { 'query_result_row_type_summaries': 'queryResultRowTypeSummaries', 'source_name': 'sourceName', 'query_results_grouped_by': 'queryResultsGroupedBy', 'query_results_ordered_by':", "this list contains the QueryResultRowTypeSummary of the ith key value pair in the", "Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0", "False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other", "this QueryResultMetadataSummary. :type: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" self._query_results_grouped_by = query_results_grouped_by @property def query_results_ordered_by(self): \"\"\" Gets", "query. :return: The query_results_grouped_by of this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary] \"\"\" return self._query_results_grouped_by @query_results_grouped_by.setter", "this QueryResultMetadataSummary. :rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary] \"\"\" return self._query_results_ordered_by @query_results_ordered_by.setter def query_results_ordered_by(self, query_results_ordered_by): \"\"\" Sets", "values. This is a list of ResultsGroupedBy summary objects, and the list will", "of the query result set (traces, spans, etc). :return: The source_name of this", "def query_results_grouped_by(self, query_results_grouped_by): \"\"\" Sets the query_results_grouped_by of this QueryResultMetadataSummary. Columns or attributes", ":rtype: int \"\"\" return self._time_series_interval_in_mins @time_series_interval_in_mins.setter def time_series_interval_in_mins(self, time_series_interval_in_mins): \"\"\" Sets the time_series_interval_in_mins", "\"\"\" self._source_name = source_name @property def query_results_grouped_by(self): \"\"\" Gets the query_results_grouped_by of this", "the source_name property of this QueryResultMetadataSummary. :type source_name: str :param query_results_grouped_by: The value", "keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of" ]
[ "json_parser.load_file(\"path/to/json/file\") assert json_parser.data == {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} def test_load_file_wrong_type(): with", "= {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} filepath = os.path.join(tmpdir, \"tmp_json.json\") with open(filepath,", "os from unittest.mock import patch, mock_open import pytest from signal_interpreter_server.json_parser import JsonParser from", "@pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir): tmp_db = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} filepath =", "assert isinstance(jason_parser.data, dict) assert jason_parser.data == tmp_db def test_load_file_simple(): with patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\":", "\"11\"}]} if identifier != '99': assert jason_parser.get_signal_title(identifier) == expected_result else: with pytest.raises(SignalError): jason_parser.get_signal_title(identifier)", "\"11\"}]} def test_load_file_wrong_type(): with patch(\"builtins.open\", mock_open(read_data=\"This is wrong data!\")): with pytest.raises(ValueError): json_parser =", "patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}')): json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\") assert json_parser.data", "json.dump(tmp_db, jfile) jason_parser = JsonParser() jason_parser.load_file(filepath) assert isinstance(jason_parser.data, dict) assert jason_parser.data == tmp_db", "jason_parser = JsonParser() jason_parser.data = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} if identifier", "jason_parser.get_signal_title(identifier) == expected_result else: with pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir): tmp_db = {\"services\":", "JsonParser() json_parser.load_file(\"path/to/json/file\") assert json_parser.data == {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} def test_load_file_wrong_type():", "pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir): tmp_db = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}", "\"id\": \"11\"}]} if identifier != '99': assert jason_parser.get_signal_title(identifier) == expected_result else: with pytest.raises(SignalError):", "== {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} def test_load_file_wrong_type(): with patch(\"builtins.open\", mock_open(read_data=\"This is", "jason_parser.data == tmp_db def test_load_file_simple(): with patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}')):", "import os from unittest.mock import patch, mock_open import pytest from signal_interpreter_server.json_parser import JsonParser", "= os.path.join(tmpdir, \"tmp_json.json\") with open(filepath, 'w') as jfile: json.dump(tmp_db, jfile) jason_parser = JsonParser()", "isinstance(jason_parser.data, dict) assert jason_parser.data == tmp_db def test_load_file_simple(): with patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\": \"ECU", "{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} def test_load_file_wrong_type(): with patch(\"builtins.open\", mock_open(read_data=\"This is wrong", "jfile: json.dump(tmp_db, jfile) jason_parser = JsonParser() jason_parser.load_file(filepath) assert isinstance(jason_parser.data, dict) assert jason_parser.data ==", "jason_parser = JsonParser() jason_parser.load_file(filepath) assert isinstance(jason_parser.data, dict) assert jason_parser.data == tmp_db def test_load_file_simple():", "SignalError @pytest.mark.parametrize(\"identifier, expected_result\", [ (\"11\", \"ECU Reset\"), (\"99\", \"Not existing\"), ]) def test_get_signal_title(identifier,", "JsonParser() jason_parser.data = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} if identifier != '99':", "jfile) jason_parser = JsonParser() jason_parser.load_file(filepath) assert isinstance(jason_parser.data, dict) assert jason_parser.data == tmp_db def", "filepath = os.path.join(tmpdir, \"tmp_json.json\") with open(filepath, 'w') as jfile: json.dump(tmp_db, jfile) jason_parser =", "assert jason_parser.data == tmp_db def test_load_file_simple(): with patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\": \"ECU Reset\", \"id\":", "Reset\"), (\"99\", \"Not existing\"), ]) def test_get_signal_title(identifier, expected_result): jason_parser = JsonParser() jason_parser.data =", "Reset\", \"id\": \"11\"}]}')): json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\") assert json_parser.data == {\"services\": [{\"title\": \"ECU", "\"tmp_json.json\") with open(filepath, 'w') as jfile: json.dump(tmp_db, jfile) jason_parser = JsonParser() jason_parser.load_file(filepath) assert", "(\"11\", \"ECU Reset\"), (\"99\", \"Not existing\"), ]) def test_get_signal_title(identifier, expected_result): jason_parser = JsonParser()", "\"ECU Reset\"), (\"99\", \"Not existing\"), ]) def test_get_signal_title(identifier, expected_result): jason_parser = JsonParser() jason_parser.data", "test_load_file_with_fixure(tmpdir): tmp_db = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} filepath = os.path.join(tmpdir, \"tmp_json.json\")", "test_get_signal_title(identifier, expected_result): jason_parser = JsonParser() jason_parser.data = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}", "with pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir): tmp_db = {\"services\": [{\"title\": \"ECU Reset\", \"id\":", "def test_load_file_simple(): with patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}')): json_parser = JsonParser()", "assert json_parser.data == {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} def test_load_file_wrong_type(): with patch(\"builtins.open\",", "jason_parser.data = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} if identifier != '99': assert", "'99': assert jason_parser.get_signal_title(identifier) == expected_result else: with pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir): tmp_db", "expected_result\", [ (\"11\", \"ECU Reset\"), (\"99\", \"Not existing\"), ]) def test_get_signal_title(identifier, expected_result): jason_parser", "json import os from unittest.mock import patch, mock_open import pytest from signal_interpreter_server.json_parser import", "[{\"title\": \"ECU Reset\", \"id\": \"11\"}]} if identifier != '99': assert jason_parser.get_signal_title(identifier) == expected_result", "[{\"title\": \"ECU Reset\", \"id\": \"11\"}]} def test_load_file_wrong_type(): with patch(\"builtins.open\", mock_open(read_data=\"This is wrong data!\")):", "as jfile: json.dump(tmp_db, jfile) jason_parser = JsonParser() jason_parser.load_file(filepath) assert isinstance(jason_parser.data, dict) assert jason_parser.data", "(\"99\", \"Not existing\"), ]) def test_get_signal_title(identifier, expected_result): jason_parser = JsonParser() jason_parser.data = {\"services\":", "\"Not existing\"), ]) def test_get_signal_title(identifier, expected_result): jason_parser = JsonParser() jason_parser.data = {\"services\": [{\"title\":", "if identifier != '99': assert jason_parser.get_signal_title(identifier) == expected_result else: with pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\")", "def test_load_file_with_fixure(tmpdir): tmp_db = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} filepath = os.path.join(tmpdir,", "\"ECU Reset\", \"id\": \"11\"}]}')): json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\") assert json_parser.data == {\"services\": [{\"title\":", "\"id\": \"11\"}]}')): json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\") assert json_parser.data == {\"services\": [{\"title\": \"ECU Reset\",", "expected_result): jason_parser = JsonParser() jason_parser.data = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} if", "\"id\": \"11\"}]} filepath = os.path.join(tmpdir, \"tmp_json.json\") with open(filepath, 'w') as jfile: json.dump(tmp_db, jfile)", "\"11\"}]}')): json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\") assert json_parser.data == {\"services\": [{\"title\": \"ECU Reset\", \"id\":", "patch, mock_open import pytest from signal_interpreter_server.json_parser import JsonParser from signal_interpreter_server.exceptions import SignalError @pytest.mark.parametrize(\"identifier,", "\"ECU Reset\", \"id\": \"11\"}]} if identifier != '99': assert jason_parser.get_signal_title(identifier) == expected_result else:", "!= '99': assert jason_parser.get_signal_title(identifier) == expected_result else: with pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir):", "[ (\"11\", \"ECU Reset\"), (\"99\", \"Not existing\"), ]) def test_get_signal_title(identifier, expected_result): jason_parser =", "JsonParser() jason_parser.load_file(filepath) assert isinstance(jason_parser.data, dict) assert jason_parser.data == tmp_db def test_load_file_simple(): with patch(\"builtins.open\",", "existing\"), ]) def test_get_signal_title(identifier, expected_result): jason_parser = JsonParser() jason_parser.data = {\"services\": [{\"title\": \"ECU", "@pytest.mark.parametrize(\"identifier, expected_result\", [ (\"11\", \"ECU Reset\"), (\"99\", \"Not existing\"), ]) def test_get_signal_title(identifier, expected_result):", "Reset\", \"id\": \"11\"}]} filepath = os.path.join(tmpdir, \"tmp_json.json\") with open(filepath, 'w') as jfile: json.dump(tmp_db,", "import JsonParser from signal_interpreter_server.exceptions import SignalError @pytest.mark.parametrize(\"identifier, expected_result\", [ (\"11\", \"ECU Reset\"), (\"99\",", "import patch, mock_open import pytest from signal_interpreter_server.json_parser import JsonParser from signal_interpreter_server.exceptions import SignalError", "mock_open import pytest from signal_interpreter_server.json_parser import JsonParser from signal_interpreter_server.exceptions import SignalError @pytest.mark.parametrize(\"identifier, expected_result\",", "from signal_interpreter_server.json_parser import JsonParser from signal_interpreter_server.exceptions import SignalError @pytest.mark.parametrize(\"identifier, expected_result\", [ (\"11\", \"ECU", "json_parser.data == {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} def test_load_file_wrong_type(): with patch(\"builtins.open\", mock_open(read_data=\"This", "[{\"title\": \"ECU Reset\", \"id\": \"11\"}]}')): json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\") assert json_parser.data == {\"services\":", "signal_interpreter_server.exceptions import SignalError @pytest.mark.parametrize(\"identifier, expected_result\", [ (\"11\", \"ECU Reset\"), (\"99\", \"Not existing\"), ])", "= JsonParser() jason_parser.data = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} if identifier !=", "Reset\", \"id\": \"11\"}]} if identifier != '99': assert jason_parser.get_signal_title(identifier) == expected_result else: with", "expected_result else: with pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir): tmp_db = {\"services\": [{\"title\": \"ECU", "JsonParser from signal_interpreter_server.exceptions import SignalError @pytest.mark.parametrize(\"identifier, expected_result\", [ (\"11\", \"ECU Reset\"), (\"99\", \"Not", "== tmp_db def test_load_file_simple(): with patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}')): json_parser", "[{\"title\": \"ECU Reset\", \"id\": \"11\"}]} filepath = os.path.join(tmpdir, \"tmp_json.json\") with open(filepath, 'w') as", "signal_interpreter_server.json_parser import JsonParser from signal_interpreter_server.exceptions import SignalError @pytest.mark.parametrize(\"identifier, expected_result\", [ (\"11\", \"ECU Reset\"),", "'w') as jfile: json.dump(tmp_db, jfile) jason_parser = JsonParser() jason_parser.load_file(filepath) assert isinstance(jason_parser.data, dict) assert", "from signal_interpreter_server.exceptions import SignalError @pytest.mark.parametrize(\"identifier, expected_result\", [ (\"11\", \"ECU Reset\"), (\"99\", \"Not existing\"),", "{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} if identifier != '99': assert jason_parser.get_signal_title(identifier) ==", "def test_get_signal_title(identifier, expected_result): jason_parser = JsonParser() jason_parser.data = {\"services\": [{\"title\": \"ECU Reset\", \"id\":", "jason_parser.load_file(filepath) assert isinstance(jason_parser.data, dict) assert jason_parser.data == tmp_db def test_load_file_simple(): with patch(\"builtins.open\", mock_open(read_data='{\"services\":", "jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir): tmp_db = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} filepath", "import json import os from unittest.mock import patch, mock_open import pytest from signal_interpreter_server.json_parser", "def test_load_file_wrong_type(): with patch(\"builtins.open\", mock_open(read_data=\"This is wrong data!\")): with pytest.raises(ValueError): json_parser = JsonParser()", "= JsonParser() jason_parser.load_file(filepath) assert isinstance(jason_parser.data, dict) assert jason_parser.data == tmp_db def test_load_file_simple(): with", "unittest.mock import patch, mock_open import pytest from signal_interpreter_server.json_parser import JsonParser from signal_interpreter_server.exceptions import", "dict) assert jason_parser.data == tmp_db def test_load_file_simple(): with patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\": \"ECU Reset\",", "\"ECU Reset\", \"id\": \"11\"}]} def test_load_file_wrong_type(): with patch(\"builtins.open\", mock_open(read_data=\"This is wrong data!\")): with", "]) def test_get_signal_title(identifier, expected_result): jason_parser = JsonParser() jason_parser.data = {\"services\": [{\"title\": \"ECU Reset\",", "= {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} if identifier != '99': assert jason_parser.get_signal_title(identifier)", "open(filepath, 'w') as jfile: json.dump(tmp_db, jfile) jason_parser = JsonParser() jason_parser.load_file(filepath) assert isinstance(jason_parser.data, dict)", "test_load_file_wrong_type(): with patch(\"builtins.open\", mock_open(read_data=\"This is wrong data!\")): with pytest.raises(ValueError): json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\")", "= JsonParser() json_parser.load_file(\"path/to/json/file\") assert json_parser.data == {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} def", "assert jason_parser.get_signal_title(identifier) == expected_result else: with pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir): tmp_db =", "tmp_db def test_load_file_simple(): with patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}')): json_parser =", "\"11\"}]} filepath = os.path.join(tmpdir, \"tmp_json.json\") with open(filepath, 'w') as jfile: json.dump(tmp_db, jfile) jason_parser", "mock_open(read_data='{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}')): json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\") assert json_parser.data ==", "with patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}')): json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\") assert", "== expected_result else: with pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir): tmp_db = {\"services\": [{\"title\":", "{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} filepath = os.path.join(tmpdir, \"tmp_json.json\") with open(filepath, 'w')", "identifier != '99': assert jason_parser.get_signal_title(identifier) == expected_result else: with pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def", "\"ECU Reset\", \"id\": \"11\"}]} filepath = os.path.join(tmpdir, \"tmp_json.json\") with open(filepath, 'w') as jfile:", "import SignalError @pytest.mark.parametrize(\"identifier, expected_result\", [ (\"11\", \"ECU Reset\"), (\"99\", \"Not existing\"), ]) def", "json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\") assert json_parser.data == {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}", "test_load_file_simple(): with patch(\"builtins.open\", mock_open(read_data='{\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]}')): json_parser = JsonParser() json_parser.load_file(\"path/to/json/file\")", "from unittest.mock import patch, mock_open import pytest from signal_interpreter_server.json_parser import JsonParser from signal_interpreter_server.exceptions", "\"id\": \"11\"}]} def test_load_file_wrong_type(): with patch(\"builtins.open\", mock_open(read_data=\"This is wrong data!\")): with pytest.raises(ValueError): json_parser", "else: with pytest.raises(SignalError): jason_parser.get_signal_title(identifier) @pytest.fixture(scope=\"session\") def test_load_file_with_fixure(tmpdir): tmp_db = {\"services\": [{\"title\": \"ECU Reset\",", "import pytest from signal_interpreter_server.json_parser import JsonParser from signal_interpreter_server.exceptions import SignalError @pytest.mark.parametrize(\"identifier, expected_result\", [", "with open(filepath, 'w') as jfile: json.dump(tmp_db, jfile) jason_parser = JsonParser() jason_parser.load_file(filepath) assert isinstance(jason_parser.data,", "os.path.join(tmpdir, \"tmp_json.json\") with open(filepath, 'w') as jfile: json.dump(tmp_db, jfile) jason_parser = JsonParser() jason_parser.load_file(filepath)", "Reset\", \"id\": \"11\"}]} def test_load_file_wrong_type(): with patch(\"builtins.open\", mock_open(read_data=\"This is wrong data!\")): with pytest.raises(ValueError):", "pytest from signal_interpreter_server.json_parser import JsonParser from signal_interpreter_server.exceptions import SignalError @pytest.mark.parametrize(\"identifier, expected_result\", [ (\"11\",", "tmp_db = {\"services\": [{\"title\": \"ECU Reset\", \"id\": \"11\"}]} filepath = os.path.join(tmpdir, \"tmp_json.json\") with" ]
[ "Test tensorflow-gpu\") import tensorflow as tf is_gpu_available = tf.test.is_gpu_available() print(\">> __version__: \", tf.__version__)", "print(\"> Test tensorflow-gpu\") import tensorflow as tf is_gpu_available = tf.test.is_gpu_available() print(\">> __version__: \",", "<reponame>tranlethaison/learnRL if __name__ == \"__main__\": print(\"> Test tensorflow-gpu\") import tensorflow as tf is_gpu_available", "import tensorflow as tf is_gpu_available = tf.test.is_gpu_available() print(\">> __version__: \", tf.__version__) print(\">> is_gpu_available:\",", "tensorflow as tf is_gpu_available = tf.test.is_gpu_available() print(\">> __version__: \", tf.__version__) print(\">> is_gpu_available:\", is_gpu_available)", "__name__ == \"__main__\": print(\"> Test tensorflow-gpu\") import tensorflow as tf is_gpu_available = tf.test.is_gpu_available()", "\"__main__\": print(\"> Test tensorflow-gpu\") import tensorflow as tf is_gpu_available = tf.test.is_gpu_available() print(\">> __version__:", "== \"__main__\": print(\"> Test tensorflow-gpu\") import tensorflow as tf is_gpu_available = tf.test.is_gpu_available() print(\">>", "tensorflow-gpu\") import tensorflow as tf is_gpu_available = tf.test.is_gpu_available() print(\">> __version__: \", tf.__version__) print(\">>", "if __name__ == \"__main__\": print(\"> Test tensorflow-gpu\") import tensorflow as tf is_gpu_available =" ]
[ "web imports from flask import Flask from blinker import Namespace # or from", "= app.test_client() # request new process data = {\"args\": [\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data)", "your choice @my_signal.connect def my_callback_fn(extra_callback_context, future: Future): \"\"\" Will be invoked on every", "flask_executor import Executor from flask_executor.futures import Future from flask_shell2http import Shell2HTTP # Flask", "process completion \"\"\" print(\"Process completed ?:\", future.done()) print(\"Result: \", future.result()) shell2http.register_command( endpoint=ENDPOINT, command_name=CMD,", "flask_executor.futures import Future from flask_shell2http import Shell2HTTP # Flask application instance app =", "..or any other name of your choice @my_signal.connect def my_callback_fn(extra_callback_context, future: Future): \"\"\"", "executor, base_url_prefix=\"/cmd/\") ENDPOINT = \"echo\" CMD = \"echo\" # Signal Handling signal_handler =", "future.result()) shell2http.register_command( endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send ) # Test Runner if __name__ == \"__main__\":", "or from flask.signals import Namespace from flask_executor import Executor from flask_executor.futures import Future", "True c = app.test_client() # request new process data = {\"args\": [\"Hello\", \"Friend!\"]}", "[\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data) # request new process data = {\"args\": [\"Bye\", \"Friend!\"]}", "completed ?:\", future.done()) print(\"Result: \", future.result()) shell2http.register_command( endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send ) # Test", "= Flask(__name__) # application factory executor = Executor(app) shell2http = Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\")", ") # Test Runner if __name__ == \"__main__\": app.testing = True c =", "# or from flask.signals import Namespace from flask_executor import Executor from flask_executor.futures import", "future: Future): \"\"\" Will be invoked on every process completion \"\"\" print(\"Process completed", "Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\") ENDPOINT = \"echo\" CMD = \"echo\" # Signal Handling signal_handler", "of your choice @my_signal.connect def my_callback_fn(extra_callback_context, future: Future): \"\"\" Will be invoked on", "Will be invoked on every process completion \"\"\" print(\"Process completed ?:\", future.done()) print(\"Result:", "Executor from flask_executor.futures import Future from flask_shell2http import Shell2HTTP # Flask application instance", "flask_shell2http import Shell2HTTP # Flask application instance app = Flask(__name__) # application factory", "ENDPOINT = \"echo\" CMD = \"echo\" # Signal Handling signal_handler = Namespace() my_signal", "Flask application instance app = Flask(__name__) # application factory executor = Executor(app) shell2http", "my_signal = signal_handler.signal(f\"on_{CMD}_complete\") # ..or any other name of your choice @my_signal.connect def", "= Namespace() my_signal = signal_handler.signal(f\"on_{CMD}_complete\") # ..or any other name of your choice", "?:\", future.done()) print(\"Result: \", future.result()) shell2http.register_command( endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send ) # Test Runner", "shell2http.register_command( endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send ) # Test Runner if __name__ == \"__main__\": app.testing", "# application factory executor = Executor(app) shell2http = Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\") ENDPOINT =", "from flask_executor.futures import Future from flask_shell2http import Shell2HTTP # Flask application instance app", "command_name=CMD, callback_fn=my_signal.send ) # Test Runner if __name__ == \"__main__\": app.testing = True", "every process completion \"\"\" print(\"Process completed ?:\", future.done()) print(\"Result: \", future.result()) shell2http.register_command( endpoint=ENDPOINT,", "application factory executor = Executor(app) shell2http = Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\") ENDPOINT = \"echo\"", "print(\"Result: \", future.result()) shell2http.register_command( endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send ) # Test Runner if __name__", "blinker import Namespace # or from flask.signals import Namespace from flask_executor import Executor", "flask import Flask from blinker import Namespace # or from flask.signals import Namespace", "\"\"\" print(\"Process completed ?:\", future.done()) print(\"Result: \", future.result()) shell2http.register_command( endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send )", "app.testing = True c = app.test_client() # request new process data = {\"args\":", "invoked on every process completion \"\"\" print(\"Process completed ?:\", future.done()) print(\"Result: \", future.result())", "print(\"Process completed ?:\", future.done()) print(\"Result: \", future.result()) shell2http.register_command( endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send ) #", "Test Runner if __name__ == \"__main__\": app.testing = True c = app.test_client() #", "== \"__main__\": app.testing = True c = app.test_client() # request new process data", "request new process data = {\"args\": [\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data) # request new", "\"\"\" Will be invoked on every process completion \"\"\" print(\"Process completed ?:\", future.done())", "future.done()) print(\"Result: \", future.result()) shell2http.register_command( endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send ) # Test Runner if", "# Signal Handling signal_handler = Namespace() my_signal = signal_handler.signal(f\"on_{CMD}_complete\") # ..or any other", "= \"echo\" # Signal Handling signal_handler = Namespace() my_signal = signal_handler.signal(f\"on_{CMD}_complete\") # ..or", "import Shell2HTTP # Flask application instance app = Flask(__name__) # application factory executor", "signal_handler = Namespace() my_signal = signal_handler.signal(f\"on_{CMD}_complete\") # ..or any other name of your", "application instance app = Flask(__name__) # application factory executor = Executor(app) shell2http =", "import Namespace # or from flask.signals import Namespace from flask_executor import Executor from", "flask.signals import Namespace from flask_executor import Executor from flask_executor.futures import Future from flask_shell2http", "{\"args\": [\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data) # request new process data = {\"args\": [\"Bye\",", "__name__ == \"__main__\": app.testing = True c = app.test_client() # request new process", "Namespace() my_signal = signal_handler.signal(f\"on_{CMD}_complete\") # ..or any other name of your choice @my_signal.connect", "instance app = Flask(__name__) # application factory executor = Executor(app) shell2http = Shell2HTTP(app,", "executor = Executor(app) shell2http = Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\") ENDPOINT = \"echo\" CMD =", "base_url_prefix=\"/cmd/\") ENDPOINT = \"echo\" CMD = \"echo\" # Signal Handling signal_handler = Namespace()", "\", future.result()) shell2http.register_command( endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send ) # Test Runner if __name__ ==", "Namespace from flask_executor import Executor from flask_executor.futures import Future from flask_shell2http import Shell2HTTP", "def my_callback_fn(extra_callback_context, future: Future): \"\"\" Will be invoked on every process completion \"\"\"", "factory executor = Executor(app) shell2http = Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\") ENDPOINT = \"echo\" CMD", "Future): \"\"\" Will be invoked on every process completion \"\"\" print(\"Process completed ?:\",", "endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send ) # Test Runner if __name__ == \"__main__\": app.testing =", "from flask_shell2http import Shell2HTTP # Flask application instance app = Flask(__name__) # application", "app = Flask(__name__) # application factory executor = Executor(app) shell2http = Shell2HTTP(app, executor,", "Namespace # or from flask.signals import Namespace from flask_executor import Executor from flask_executor.futures", "my_callback_fn(extra_callback_context, future: Future): \"\"\" Will be invoked on every process completion \"\"\" print(\"Process", "any other name of your choice @my_signal.connect def my_callback_fn(extra_callback_context, future: Future): \"\"\" Will", "Runner if __name__ == \"__main__\": app.testing = True c = app.test_client() # request", "# Test Runner if __name__ == \"__main__\": app.testing = True c = app.test_client()", "= Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\") ENDPOINT = \"echo\" CMD = \"echo\" # Signal Handling", "choice @my_signal.connect def my_callback_fn(extra_callback_context, future: Future): \"\"\" Will be invoked on every process", "if __name__ == \"__main__\": app.testing = True c = app.test_client() # request new", "# ..or any other name of your choice @my_signal.connect def my_callback_fn(extra_callback_context, future: Future):", "\"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data) # request new process data = {\"args\": [\"Bye\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\",", "import Namespace from flask_executor import Executor from flask_executor.futures import Future from flask_shell2http import", "@my_signal.connect def my_callback_fn(extra_callback_context, future: Future): \"\"\" Will be invoked on every process completion", "import Executor from flask_executor.futures import Future from flask_shell2http import Shell2HTTP # Flask application", "= \"echo\" CMD = \"echo\" # Signal Handling signal_handler = Namespace() my_signal =", "# Flask application instance app = Flask(__name__) # application factory executor = Executor(app)", "on every process completion \"\"\" print(\"Process completed ?:\", future.done()) print(\"Result: \", future.result()) shell2http.register_command(", "process data = {\"args\": [\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data) # request new process data", "other name of your choice @my_signal.connect def my_callback_fn(extra_callback_context, future: Future): \"\"\" Will be", "CMD = \"echo\" # Signal Handling signal_handler = Namespace() my_signal = signal_handler.signal(f\"on_{CMD}_complete\") #", "Future from flask_shell2http import Shell2HTTP # Flask application instance app = Flask(__name__) #", "Shell2HTTP # Flask application instance app = Flask(__name__) # application factory executor =", "Flask(__name__) # application factory executor = Executor(app) shell2http = Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\") ENDPOINT", "\"echo\" CMD = \"echo\" # Signal Handling signal_handler = Namespace() my_signal = signal_handler.signal(f\"on_{CMD}_complete\")", "imports from flask import Flask from blinker import Namespace # or from flask.signals", "= signal_handler.signal(f\"on_{CMD}_complete\") # ..or any other name of your choice @my_signal.connect def my_callback_fn(extra_callback_context,", "from blinker import Namespace # or from flask.signals import Namespace from flask_executor import", "from flask.signals import Namespace from flask_executor import Executor from flask_executor.futures import Future from", "= Executor(app) shell2http = Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\") ENDPOINT = \"echo\" CMD = \"echo\"", "Handling signal_handler = Namespace() my_signal = signal_handler.signal(f\"on_{CMD}_complete\") # ..or any other name of", "completion \"\"\" print(\"Process completed ?:\", future.done()) print(\"Result: \", future.result()) shell2http.register_command( endpoint=ENDPOINT, command_name=CMD, callback_fn=my_signal.send", "\"echo\" # Signal Handling signal_handler = Namespace() my_signal = signal_handler.signal(f\"on_{CMD}_complete\") # ..or any", "name of your choice @my_signal.connect def my_callback_fn(extra_callback_context, future: Future): \"\"\" Will be invoked", "c.post(f\"cmd/{ENDPOINT}\", json=data) # request new process data = {\"args\": [\"Bye\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data)", "= True c = app.test_client() # request new process data = {\"args\": [\"Hello\",", "Flask from blinker import Namespace # or from flask.signals import Namespace from flask_executor", "be invoked on every process completion \"\"\" print(\"Process completed ?:\", future.done()) print(\"Result: \",", "c = app.test_client() # request new process data = {\"args\": [\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\",", "Executor(app) shell2http = Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\") ENDPOINT = \"echo\" CMD = \"echo\" #", "shell2http = Shell2HTTP(app, executor, base_url_prefix=\"/cmd/\") ENDPOINT = \"echo\" CMD = \"echo\" # Signal", "callback_fn=my_signal.send ) # Test Runner if __name__ == \"__main__\": app.testing = True c", "= {\"args\": [\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data) # request new process data = {\"args\":", "from flask_executor import Executor from flask_executor.futures import Future from flask_shell2http import Shell2HTTP #", "import Future from flask_shell2http import Shell2HTTP # Flask application instance app = Flask(__name__)", "data = {\"args\": [\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data) # request new process data =", "# web imports from flask import Flask from blinker import Namespace # or", "Signal Handling signal_handler = Namespace() my_signal = signal_handler.signal(f\"on_{CMD}_complete\") # ..or any other name", "signal_handler.signal(f\"on_{CMD}_complete\") # ..or any other name of your choice @my_signal.connect def my_callback_fn(extra_callback_context, future:", "app.test_client() # request new process data = {\"args\": [\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data) #", "# request new process data = {\"args\": [\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data) # request", "new process data = {\"args\": [\"Hello\", \"Friend!\"]} c.post(f\"cmd/{ENDPOINT}\", json=data) # request new process", "from flask import Flask from blinker import Namespace # or from flask.signals import", "\"__main__\": app.testing = True c = app.test_client() # request new process data =", "import Flask from blinker import Namespace # or from flask.signals import Namespace from" ]
[ "start_obj_id, end_obj_id): \"\"\" method finds unit_of_work record and returns it to the caller\"\"\"", "as e: exc = DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id, instance.end_id, e) raise exc @thread_safe def", "recover from DuplicateKeyError \"\"\" if isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id)", "ID=%s was not found' % str(key) self.logger.warn(msg) raise LookupError(msg) return UnitOfWork.from_json(document) @thread_safe def", "{unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document =", "from synergy.system import time_helper from synergy.system.time_qualifier import * from synergy.system.decorator import thread_safe from", "UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = instance.document if instance.db_id: document['_id'] = ObjectId(instance.db_id) instance.db_id", "{'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if since is None: cursor = collection.find(query).sort('_id',", "[unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if since is None: cursor = collection.find(query).sort('_id', ASCENDING)", "\"\"\" Thread-safe Data Access Object from units_of_work table/collection \"\"\" def __init__(self, logger): super(UnitOfWorkDao,", "= {'$gte': yearly_timeperiod} cursor = collection.find(query).sort('_id', ASCENDING) for document in cursor: uow =", "pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError from synergy.system import time_helper from synergy.system.time_qualifier import *", "method queries Unit Of Work whose <start_timeperiod> is younger than <since> and who", "is None: cursor = collection.find(query).sort('_id', ASCENDING) candidates = [UnitOfWork.from_json(document) for document in cursor]", "instance): \"\"\" method finds unit_of_work record and change its status\"\"\" assert isinstance(instance, UnitOfWork)", "isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except LookupError as e: self.logger.error('Unable", "{ unit_of_work.TIMEPERIOD: {'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else None}", "= QUALIFIER_HOURLY process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since) if process_specific_since <= uow.start_timeperiod: candidates.append(uow) if len(candidates)", "LookupError('MongoDB has no reprocessing candidates units of work') return candidates @thread_safe def get_by_params(self,", "process continue time_qualifier = context.process_context[uow.process_name].time_qualifier if time_qualifier == QUALIFIER_REAL_TIME: time_qualifier = QUALIFIER_HOURLY process_specific_since", "% e.message, exc_info=True) else: msg = 'Unable to recover from DuplicateKeyError due to", "ds_manager.ds_factory(logger) @thread_safe def get_one(self, key): \"\"\" method finds unit_of_work record and returns it", "of work into MongoDB. :raises DuplicateKeyError: if such record already exist \"\"\" assert", "work into MongoDB. :raises DuplicateKeyError: if such record already exist \"\"\" assert isinstance(instance,", "from synergy.db.manager import ds_manager QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only: { unit_of_work.TIMEPERIOD: {'$gte': timeperiod},", "to recover from DuplicateKeyError error due to %s' % e.message, exc_info=True) else: msg", "logger): super(UnitOfWorkDao, self).__init__() self.logger = logger self.lock = RLock() self.ds = ds_manager.ds_factory(logger) @thread_safe", "unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if", "who could be candidates for re-processing \"\"\" collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) query = {unit_of_work.STATE:", "a unit of work into MongoDB. :raises DuplicateKeyError: if such record already exist", "None: msg = 'Unit_of_work with ID=%s was not found' % str(key) self.logger.warn(msg) raise", "unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else None} } class UnitOfWorkDao(object): \"\"\" Thread-safe", "None: cursor = collection.find(query).sort('_id', ASCENDING) candidates = [UnitOfWork.from_json(document) for document in cursor] else:", "cursor] else: candidates = [] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod}", "cursor = collection.find(query).sort('_id', ASCENDING) for document in cursor: uow = UnitOfWork.from_json(document) if uow.process_name", "import RLock from bson.objectid import ObjectId from pymongo import ASCENDING from pymongo.errors import", "from synergy.conf import context from synergy.db.error import DuplicateKeyError from synergy.db.model import unit_of_work from", "unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if since is None: cursor = collection.find(query).sort('_id', ASCENDING) candidates", "document = collection.find_one(query) if document is None: raise LookupError('Unit_of_work satisfying query %r was", "else: msg = 'Unable to recover from DuplicateKeyError due to unspecified unit_of_work primary", "it to the caller\"\"\" query = {unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID:", "\"\"\" method finds unit_of_work record and returns it to the caller\"\"\" if not", "since=None): \"\"\" method queries Unit Of Work whose <start_timeperiod> is younger than <since>", "query = {unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)", "recover_from_duplicatekeyerror(self, e): \"\"\" method tries to recover from DuplicateKeyError \"\"\" if isinstance(e, DuplicateKeyError):", "self.ds = ds_manager.ds_factory(logger) @thread_safe def get_one(self, key): \"\"\" method finds unit_of_work record and", "= ds_manager.ds_factory(logger) @thread_safe def get_one(self, key): \"\"\" method finds unit_of_work record and returns", "work') return candidates @thread_safe def get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id): \"\"\" method finds", "= collection.save(document, safe=True) return instance.db_id @thread_safe def insert(self, instance): \"\"\" inserts a unit", "change its status\"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = instance.document if", "return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except LookupError as e: self.logger.error('Unable to recover from", "# cast key to ObjectId key = ObjectId(key) query = {'_id': key} collection", "already exist \"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return collection.insert(instance.document, safe=True)", "unit_of_work.END_OBJ_ID: end_obj_id} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None: raise", "query) return UnitOfWork.from_json(document) @thread_safe def update(self, instance): \"\"\" method finds unit_of_work record and", "if len(candidates) == 0: raise LookupError('MongoDB has no reprocessing candidates units of work')", "candidates for re-processing \"\"\" collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID,", "def get_one(self, key): \"\"\" method finds unit_of_work record and returns it to the", "query): \"\"\" method runs the query and returns a list of filtered UnitOfWork", "= self.ds.connection(COLLECTION_UNIT_OF_WORK) document = instance.document if instance.db_id: document['_id'] = ObjectId(instance.db_id) instance.db_id = collection.save(document,", "{'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else None} } class UnitOfWorkDao(object): \"\"\" Thread-safe Data Access", "from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError from synergy.system import time_helper from synergy.system.time_qualifier import", "ASCENDING) for document in cursor: uow = UnitOfWork.from_json(document) if uow.process_name not in context.process_context:", "* from synergy.system.decorator import thread_safe from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from synergy.conf import", "@thread_safe def get_reprocessing_candidates(self, since=None): \"\"\" method queries Unit Of Work whose <start_timeperiod> is", "returns it to the caller\"\"\" query = {unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID: start_obj_id,", "None} } class UnitOfWorkDao(object): \"\"\" Thread-safe Data Access Object from units_of_work table/collection \"\"\"", "for document in cursor] else: candidates = [] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD]", "satisfying query %r was not found' % query) return UnitOfWork.from_json(document) @thread_safe def update(self,", "= self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document in cursor] def recover_from_duplicatekeyerror(self, e): \"\"\"", "DuplicateKeyError \"\"\" if isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except LookupError", "DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except LookupError as e: self.logger.error('Unable to", "collection.remove(uow_id, safe=True) @thread_safe def run_query(self, query): \"\"\" method runs the query and returns", "self.logger = logger self.lock = RLock() self.ds = ds_manager.ds_factory(logger) @thread_safe def get_one(self, key):", "get_reprocessing_candidates(self, since=None): \"\"\" method queries Unit Of Work whose <start_timeperiod> is younger than", "key): \"\"\" method finds unit_of_work record and returns it to the caller\"\"\" if", "record already exist \"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return collection.insert(instance.document,", "found' % str(key) self.logger.warn(msg) raise LookupError(msg) return UnitOfWork.from_json(document) @thread_safe def get_reprocessing_candidates(self, since=None): \"\"\"", "document is None: msg = 'Unit_of_work with ID=%s was not found' % str(key)", "collection.find(query).sort('_id', ASCENDING) for document in cursor: uow = UnitOfWork.from_json(document) if uow.process_name not in", "assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return collection.insert(instance.document, safe=True) except MongoDuplicateKeyError as", "= ObjectId(instance.db_id) instance.db_id = collection.save(document, safe=True) return instance.db_id @thread_safe def insert(self, instance): \"\"\"", "runs the query and returns a list of filtered UnitOfWork records \"\"\" cursor", "import context from synergy.db.error import DuplicateKeyError from synergy.db.model import unit_of_work from synergy.db.model.unit_of_work import", "== QUALIFIER_REAL_TIME: time_qualifier = QUALIFIER_HOURLY process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since) if process_specific_since <= uow.start_timeperiod:", "query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod} cursor = collection.find(query).sort('_id', ASCENDING) for document in cursor: uow", "method finds unit_of_work record and change its status\"\"\" assert isinstance(instance, UnitOfWork) collection =", "process_name, timeperiod, start_obj_id, end_obj_id): \"\"\" method finds unit_of_work record and returns it to", "except LookupError as e: self.logger.error('Unable to recover from DuplicateKeyError error due to %s'", "thread_safe from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from synergy.conf import context from synergy.db.error import", "[UnitOfWork.from_json(document) for document in cursor] else: candidates = [] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since)", "e.timeperiod, e.start_id, e.end_id) except LookupError as e: self.logger.error('Unable to recover from DuplicateKeyError error", "ObjectId): # cast key to ObjectId key = ObjectId(key) query = {'_id': key}", "e): \"\"\" method tries to recover from DuplicateKeyError \"\"\" if isinstance(e, DuplicateKeyError): try:", "in cursor] else: candidates = [] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD] = {'$gte':", "isinstance(key, ObjectId): # cast key to ObjectId key = ObjectId(key) query = {'_id':", "query) return [UnitOfWork.from_json(document) for document in cursor] def recover_from_duplicatekeyerror(self, e): \"\"\" method tries", "if since is None: cursor = collection.find(query).sort('_id', ASCENDING) candidates = [UnitOfWork.from_json(document) for document", "safe=True) @thread_safe def run_query(self, query): \"\"\" method runs the query and returns a", "instance.db_id @thread_safe def insert(self, instance): \"\"\" inserts a unit of work into MongoDB.", "method runs the query and returns a list of filtered UnitOfWork records \"\"\"", "[UnitOfWork.from_json(document) for document in cursor] def recover_from_duplicatekeyerror(self, e): \"\"\" method tries to recover", "RLock from bson.objectid import ObjectId from pymongo import ASCENDING from pymongo.errors import DuplicateKeyError", "inserts a unit of work into MongoDB. :raises DuplicateKeyError: if such record already", "uow.process_name not in context.process_context: # this is a decommissioned process continue time_qualifier =", "'<NAME>' from threading import RLock from bson.objectid import ObjectId from pymongo import ASCENDING", "QUALIFIER_REAL_TIME: time_qualifier = QUALIFIER_HOURLY process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since) if process_specific_since <= uow.start_timeperiod: candidates.append(uow)", "self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None: raise LookupError('Unit_of_work satisfying query %r", "len(candidates) == 0: raise LookupError('MongoDB has no reprocessing candidates units of work') return", "synergy.system.decorator import thread_safe from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from synergy.conf import context from", "self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id, safe=True) @thread_safe def run_query(self, query): \"\"\" method runs the query", "def get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id): \"\"\" method finds unit_of_work record and returns", "instance): \"\"\" inserts a unit of work into MongoDB. :raises DuplicateKeyError: if such", "and returns it to the caller\"\"\" query = {unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID:", "TYPE_MANAGED from synergy.conf import context from synergy.db.error import DuplicateKeyError from synergy.db.model import unit_of_work", "be candidates for re-processing \"\"\" collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS,", "document['_id'] = ObjectId(instance.db_id) instance.db_id = collection.save(document, safe=True) return instance.db_id @thread_safe def insert(self, instance):", "if unprocessed_only else None} } class UnitOfWorkDao(object): \"\"\" Thread-safe Data Access Object from", "unit_of_work.TIMEPERIOD: {'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else None} }", "synergy.db.model.unit_of_work import UnitOfWork from synergy.db.manager import ds_manager QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only: {", "__init__(self, logger): super(UnitOfWorkDao, self).__init__() self.logger = logger self.lock = RLock() self.ds = ds_manager.ds_factory(logger)", "return collection.remove(uow_id, safe=True) @thread_safe def run_query(self, query): \"\"\" method runs the query and", "tries to recover from DuplicateKeyError \"\"\" if isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod,", "a list of filtered UnitOfWork records \"\"\" cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document)", "ASCENDING from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError from synergy.system import time_helper from synergy.system.time_qualifier", "uow = UnitOfWork.from_json(document) if uow.process_name not in context.process_context: # this is a decommissioned", "instance.document if instance.db_id: document['_id'] = ObjectId(instance.db_id) instance.db_id = collection.save(document, safe=True) return instance.db_id @thread_safe", "= [UnitOfWork.from_json(document) for document in cursor] else: candidates = [] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY,", "document = collection.find_one(query) if document is None: msg = 'Unit_of_work with ID=%s was", "def run_query(self, query): \"\"\" method runs the query and returns a list of", "document in cursor] else: candidates = [] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD] =", "return collection.insert(instance.document, safe=True) except MongoDuplicateKeyError as e: exc = DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id, instance.end_id,", "instance.start_timeperiod, instance.start_id, instance.end_id, e) raise exc @thread_safe def remove(self, uow_id): assert isinstance(uow_id, (str,", "import * from synergy.system.decorator import thread_safe from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from synergy.conf", "if time_qualifier == QUALIFIER_REAL_TIME: time_qualifier = QUALIFIER_HOURLY process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since) if process_specific_since", "safe=True) except MongoDuplicateKeyError as e: exc = DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id, instance.end_id, e) raise", "UnitOfWork.from_json(document) @thread_safe def update(self, instance): \"\"\" method finds unit_of_work record and change its", "cursor: uow = UnitOfWork.from_json(document) if uow.process_name not in context.process_context: # this is a", "@thread_safe def remove(self, uow_id): assert isinstance(uow_id, (str, ObjectId)) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id,", "{'$gte': yearly_timeperiod} cursor = collection.find(query).sort('_id', ASCENDING) for document in cursor: uow = UnitOfWork.from_json(document)", "import ObjectId from pymongo import ASCENDING from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError from", "e: exc = DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id, instance.end_id, e) raise exc @thread_safe def remove(self,", "@thread_safe def get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id): \"\"\" method finds unit_of_work record and", "\"\"\" method finds unit_of_work record and change its status\"\"\" assert isinstance(instance, UnitOfWork) collection", "filtered UnitOfWork records \"\"\" cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document in", "get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id): \"\"\" method finds unit_of_work record and returns it", "as MongoDuplicateKeyError from synergy.system import time_helper from synergy.system.time_qualifier import * from synergy.system.decorator import", "\"\"\" method queries Unit Of Work whose <start_timeperiod> is younger than <since> and", "return instance.db_id @thread_safe def insert(self, instance): \"\"\" inserts a unit of work into", "its status\"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = instance.document if instance.db_id:", "exc @thread_safe def remove(self, uow_id): assert isinstance(uow_id, (str, ObjectId)) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) return", "ObjectId from pymongo import ASCENDING from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError from synergy.system", "synergy.db.manager import ds_manager QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only: { unit_of_work.TIMEPERIOD: {'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE:", "than <since> and who could be candidates for re-processing \"\"\" collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)", "since is None: cursor = collection.find(query).sort('_id', ASCENDING) candidates = [UnitOfWork.from_json(document) for document in", "ObjectId(key) query = {'_id': key} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document", "could be candidates for re-processing \"\"\" collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) query = {unit_of_work.STATE: {'$in':", "is younger than <since> and who could be candidates for re-processing \"\"\" collection", "DuplicateKeyError error due to %s' % e.message, exc_info=True) else: msg = 'Unable to", "context from synergy.db.error import DuplicateKeyError from synergy.db.model import unit_of_work from synergy.db.model.unit_of_work import UnitOfWork", "not found' % str(key) self.logger.warn(msg) raise LookupError(msg) return UnitOfWork.from_json(document) @thread_safe def get_reprocessing_candidates(self, since=None):", "isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = instance.document if instance.db_id: document['_id'] = ObjectId(instance.db_id)", "returns a list of filtered UnitOfWork records \"\"\" cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return", "%r was not found' % query) return UnitOfWork.from_json(document) @thread_safe def update(self, instance): \"\"\"", "LookupError('Unit_of_work satisfying query %r was not found' % query) return UnitOfWork.from_json(document) @thread_safe def", "unit_of_work record and returns it to the caller\"\"\" if not isinstance(key, ObjectId): #", "<= uow.start_timeperiod: candidates.append(uow) if len(candidates) == 0: raise LookupError('MongoDB has no reprocessing candidates", "MongoDuplicateKeyError as e: exc = DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id, instance.end_id, e) raise exc @thread_safe", "unit_of_work.STATE_PROCESSED if unprocessed_only else None} } class UnitOfWorkDao(object): \"\"\" Thread-safe Data Access Object", "from synergy.db.model import unit_of_work from synergy.db.model.unit_of_work import UnitOfWork from synergy.db.manager import ds_manager QUERY_GET_FREERUN_SINCE", "run_query(self, query): \"\"\" method runs the query and returns a list of filtered", "0: raise LookupError('MongoDB has no reprocessing candidates units of work') return candidates @thread_safe", "method finds unit_of_work record and returns it to the caller\"\"\" if not isinstance(key,", "UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return collection.insert(instance.document, safe=True) except MongoDuplicateKeyError as e: exc", "error due to %s' % e.message, exc_info=True) else: msg = 'Unable to recover", "into MongoDB. :raises DuplicateKeyError: if such record already exist \"\"\" assert isinstance(instance, UnitOfWork)", "is a decommissioned process continue time_qualifier = context.process_context[uow.process_name].time_qualifier if time_qualifier == QUALIFIER_REAL_TIME: time_qualifier", "self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except LookupError as e: self.logger.error('Unable to recover from DuplicateKeyError", "pymongo import ASCENDING from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError from synergy.system import time_helper", "candidates = [] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod} cursor =", "document in cursor: uow = UnitOfWork.from_json(document) if uow.process_name not in context.process_context: # this", "'Unit_of_work with ID=%s was not found' % str(key) self.logger.warn(msg) raise LookupError(msg) return UnitOfWork.from_json(document)", "recover from DuplicateKeyError error due to %s' % e.message, exc_info=True) else: msg =", "yearly_timeperiod} cursor = collection.find(query).sort('_id', ASCENDING) for document in cursor: uow = UnitOfWork.from_json(document) if", "insert(self, instance): \"\"\" inserts a unit of work into MongoDB. :raises DuplicateKeyError: if", "Thread-safe Data Access Object from units_of_work table/collection \"\"\" def __init__(self, logger): super(UnitOfWorkDao, self).__init__()", "collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = instance.document if instance.db_id: document['_id'] = ObjectId(instance.db_id) instance.db_id =", "def remove(self, uow_id): assert isinstance(uow_id, (str, ObjectId)) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id, safe=True)", "LookupError(msg) return UnitOfWork.from_json(document) @thread_safe def get_reprocessing_candidates(self, since=None): \"\"\" method queries Unit Of Work", "\"\"\" inserts a unit of work into MongoDB. :raises DuplicateKeyError: if such record", "candidates @thread_safe def get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id): \"\"\" method finds unit_of_work record", "from DuplicateKeyError error due to %s' % e.message, exc_info=True) else: msg = 'Unable", "= {unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document", "import DuplicateKeyError from synergy.db.model import unit_of_work from synergy.db.model.unit_of_work import UnitOfWork from synergy.db.manager import", "{'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else None} } class", "= self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None: msg = 'Unit_of_work with", "this is a decommissioned process continue time_qualifier = context.process_context[uow.process_name].time_qualifier if time_qualifier == QUALIFIER_REAL_TIME:", "UnitOfWork.from_json(document) if uow.process_name not in context.process_context: # this is a decommissioned process continue", "= 'Unable to recover from DuplicateKeyError due to unspecified unit_of_work primary key' self.logger.error(msg)", "def __init__(self, logger): super(UnitOfWorkDao, self).__init__() self.logger = logger self.lock = RLock() self.ds =", "to the caller\"\"\" query = {unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id}", "import UnitOfWork from synergy.db.manager import ds_manager QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only: { unit_of_work.TIMEPERIOD:", "self.logger.warn(msg) raise LookupError(msg) return UnitOfWork.from_json(document) @thread_safe def get_reprocessing_candidates(self, since=None): \"\"\" method queries Unit", "exc_info=True) else: msg = 'Unable to recover from DuplicateKeyError due to unspecified unit_of_work", "QUALIFIER_HOURLY process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since) if process_specific_since <= uow.start_timeperiod: candidates.append(uow) if len(candidates) ==", "remove(self, uow_id): assert isinstance(uow_id, (str, ObjectId)) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id, safe=True) @thread_safe", "@thread_safe def insert(self, instance): \"\"\" inserts a unit of work into MongoDB. :raises", "try: return collection.insert(instance.document, safe=True) except MongoDuplicateKeyError as e: exc = DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id,", "time_qualifier = context.process_context[uow.process_name].time_qualifier if time_qualifier == QUALIFIER_REAL_TIME: time_qualifier = QUALIFIER_HOURLY process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier,", "raise exc @thread_safe def remove(self, uow_id): assert isinstance(uow_id, (str, ObjectId)) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)", "end_obj_id): \"\"\" method finds unit_of_work record and returns it to the caller\"\"\" query", "not isinstance(key, ObjectId): # cast key to ObjectId key = ObjectId(key) query =", "self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None: msg = 'Unit_of_work with ID=%s", "QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only: { unit_of_work.TIMEPERIOD: {'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne':", "process_name, unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query)", "was not found' % query) return UnitOfWork.from_json(document) @thread_safe def update(self, instance): \"\"\" method", "= self.ds.connection(COLLECTION_UNIT_OF_WORK) query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if since", "has no reprocessing candidates units of work') return candidates @thread_safe def get_by_params(self, process_name,", "instance.end_id, e) raise exc @thread_safe def remove(self, uow_id): assert isinstance(uow_id, (str, ObjectId)) collection", "@thread_safe def update(self, instance): \"\"\" method finds unit_of_work record and change its status\"\"\"", "[] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod} cursor = collection.find(query).sort('_id', ASCENDING)", "record and change its status\"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document =", "as e: self.logger.error('Unable to recover from DuplicateKeyError error due to %s' % e.message,", "ObjectId(instance.db_id) instance.db_id = collection.save(document, safe=True) return instance.db_id @thread_safe def insert(self, instance): \"\"\" inserts", "candidates = [UnitOfWork.from_json(document) for document in cursor] else: candidates = [] yearly_timeperiod =", "\"\"\" if isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except LookupError as", "candidates.append(uow) if len(candidates) == 0: raise LookupError('MongoDB has no reprocessing candidates units of", "= collection.find(query).sort('_id', ASCENDING) candidates = [UnitOfWork.from_json(document) for document in cursor] else: candidates =", "<start_timeperiod> is younger than <since> and who could be candidates for re-processing \"\"\"", "from pymongo import ASCENDING from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError from synergy.system import", "super(UnitOfWorkDao, self).__init__() self.logger = logger self.lock = RLock() self.ds = ds_manager.ds_factory(logger) @thread_safe def", "ObjectId key = ObjectId(key) query = {'_id': key} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document =", "Access Object from units_of_work table/collection \"\"\" def __init__(self, logger): super(UnitOfWorkDao, self).__init__() self.logger =", "if isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except LookupError as e:", "in cursor: uow = UnitOfWork.from_json(document) if uow.process_name not in context.process_context: # this is", "\"\"\" collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED}", "= [] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod} cursor = collection.find(query).sort('_id',", "collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None: raise LookupError('Unit_of_work satisfying", "= ObjectId(key) query = {'_id': key} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if", "unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else None} } class UnitOfWorkDao(object): \"\"\" Thread-safe Data", "return candidates @thread_safe def get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id): \"\"\" method finds unit_of_work", "import ASCENDING from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError from synergy.system import time_helper from", "younger than <since> and who could be candidates for re-processing \"\"\" collection =", "return UnitOfWork.from_json(document) @thread_safe def get_reprocessing_candidates(self, since=None): \"\"\" method queries Unit Of Work whose", "raise LookupError('Unit_of_work satisfying query %r was not found' % query) return UnitOfWork.from_json(document) @thread_safe", "UnitOfWorkDao(object): \"\"\" Thread-safe Data Access Object from units_of_work table/collection \"\"\" def __init__(self, logger):", "= UnitOfWork.from_json(document) if uow.process_name not in context.process_context: # this is a decommissioned process", "Data Access Object from units_of_work table/collection \"\"\" def __init__(self, logger): super(UnitOfWorkDao, self).__init__() self.logger", "instance.db_id = collection.save(document, safe=True) return instance.db_id @thread_safe def insert(self, instance): \"\"\" inserts a", "is None: raise LookupError('Unit_of_work satisfying query %r was not found' % query) return", "table/collection \"\"\" def __init__(self, logger): super(UnitOfWorkDao, self).__init__() self.logger = logger self.lock = RLock()", "document in cursor] def recover_from_duplicatekeyerror(self, e): \"\"\" method tries to recover from DuplicateKeyError", "uow_id): assert isinstance(uow_id, (str, ObjectId)) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id, safe=True) @thread_safe def", "%s' % e.message, exc_info=True) else: msg = 'Unable to recover from DuplicateKeyError due", "if document is None: raise LookupError('Unit_of_work satisfying query %r was not found' %", "a decommissioned process continue time_qualifier = context.process_context[uow.process_name].time_qualifier if time_qualifier == QUALIFIER_REAL_TIME: time_qualifier =", "e) raise exc @thread_safe def remove(self, uow_id): assert isinstance(uow_id, (str, ObjectId)) collection =", "= self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return collection.insert(instance.document, safe=True) except MongoDuplicateKeyError as e: exc = DuplicateKeyError(instance.process_name,", "unprocessed_only else None} } class UnitOfWorkDao(object): \"\"\" Thread-safe Data Access Object from units_of_work", "such record already exist \"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return", "Work whose <start_timeperiod> is younger than <since> and who could be candidates for", "collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id, safe=True) @thread_safe def run_query(self, query): \"\"\" method runs", "DuplicateKeyError: if such record already exist \"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)", "candidates units of work') return candidates @thread_safe def get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id):", "collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if", "self.lock = RLock() self.ds = ds_manager.ds_factory(logger) @thread_safe def get_one(self, key): \"\"\" method finds", "collection.insert(instance.document, safe=True) except MongoDuplicateKeyError as e: exc = DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id, instance.end_id, e)", "unit_of_work record and returns it to the caller\"\"\" query = {unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD:", "\"\"\" method tries to recover from DuplicateKeyError \"\"\" if isinstance(e, DuplicateKeyError): try: return", "to %s' % e.message, exc_info=True) else: msg = 'Unable to recover from DuplicateKeyError", "= self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None: raise LookupError('Unit_of_work satisfying query", "def update(self, instance): \"\"\" method finds unit_of_work record and change its status\"\"\" assert", "DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id, instance.end_id, e) raise exc @thread_safe def remove(self, uow_id): assert isinstance(uow_id,", "= collection.find_one(query) if document is None: raise LookupError('Unit_of_work satisfying query %r was not", "timeperiod, unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document", "finds unit_of_work record and returns it to the caller\"\"\" query = {unit_of_work.PROCESS_NAME: process_name,", "query = {'_id': key} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is", "assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = instance.document if instance.db_id: document['_id'] =", "records \"\"\" cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document in cursor] def", "whose <start_timeperiod> is younger than <since> and who could be candidates for re-processing", "if such record already exist \"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) try:", "due to %s' % e.message, exc_info=True) else: msg = 'Unable to recover from", "and who could be candidates for re-processing \"\"\" collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) query =", "record and returns it to the caller\"\"\" if not isinstance(key, ObjectId): # cast", "re-processing \"\"\" collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE:", "\"\"\" method runs the query and returns a list of filtered UnitOfWork records", "time_helper.cast_to_time_qualifier(time_qualifier, since) if process_specific_since <= uow.start_timeperiod: candidates.append(uow) if len(candidates) == 0: raise LookupError('MongoDB", "the caller\"\"\" query = {unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id} collection", "collection.find_one(query) if document is None: raise LookupError('Unit_of_work satisfying query %r was not found'", "= DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id, instance.end_id, e) raise exc @thread_safe def remove(self, uow_id): assert", "msg = 'Unit_of_work with ID=%s was not found' % str(key) self.logger.warn(msg) raise LookupError(msg)", "unprocessed_only: { unit_of_work.TIMEPERIOD: {'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else", "continue time_qualifier = context.process_context[uow.process_name].time_qualifier if time_qualifier == QUALIFIER_REAL_TIME: time_qualifier = QUALIFIER_HOURLY process_specific_since =", "unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else None} } class UnitOfWorkDao(object): \"\"\"", "synergy.db.error import DuplicateKeyError from synergy.db.model import unit_of_work from synergy.db.model.unit_of_work import UnitOfWork from synergy.db.manager", "query and returns a list of filtered UnitOfWork records \"\"\" cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK,", "collection.find(query).sort('_id', ASCENDING) candidates = [UnitOfWork.from_json(document) for document in cursor] else: candidates = []", "DuplicateKeyError from synergy.db.model import unit_of_work from synergy.db.model.unit_of_work import UnitOfWork from synergy.db.manager import ds_manager", "it to the caller\"\"\" if not isinstance(key, ObjectId): # cast key to ObjectId", "unit_of_work record and change its status\"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document", "unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if since is None: cursor = collection.find(query).sort('_id', ASCENDING) candidates =", "from bson.objectid import ObjectId from pymongo import ASCENDING from pymongo.errors import DuplicateKeyError as", "= collection.find_one(query) if document is None: msg = 'Unit_of_work with ID=%s was not", "except MongoDuplicateKeyError as e: exc = DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id, instance.end_id, e) raise exc", "instance.start_id, instance.end_id, e) raise exc @thread_safe def remove(self, uow_id): assert isinstance(uow_id, (str, ObjectId))", "ObjectId)) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id, safe=True) @thread_safe def run_query(self, query): \"\"\" method", "list of filtered UnitOfWork records \"\"\" cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for", "query %r was not found' % query) return UnitOfWork.from_json(document) @thread_safe def update(self, instance):", "collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None: msg = 'Unit_of_work", "query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if since is None:", "from synergy.system.time_qualifier import * from synergy.system.decorator import thread_safe from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED", "try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except LookupError as e: self.logger.error('Unable to recover", "LookupError as e: self.logger.error('Unable to recover from DuplicateKeyError error due to %s' %", "yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod} cursor = collection.find(query).sort('_id', ASCENDING) for", "to ObjectId key = ObjectId(key) query = {'_id': key} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document", "start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None:", "= {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if since is None: cursor", "\"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return collection.insert(instance.document, safe=True) except MongoDuplicateKeyError", "Object from units_of_work table/collection \"\"\" def __init__(self, logger): super(UnitOfWorkDao, self).__init__() self.logger = logger", "ASCENDING) candidates = [UnitOfWork.from_json(document) for document in cursor] else: candidates = [] yearly_timeperiod", "from threading import RLock from bson.objectid import ObjectId from pymongo import ASCENDING from", "unit_of_work from synergy.db.model.unit_of_work import UnitOfWork from synergy.db.manager import ds_manager QUERY_GET_FREERUN_SINCE = lambda timeperiod,", ":raises DuplicateKeyError: if such record already exist \"\"\" assert isinstance(instance, UnitOfWork) collection =", "and change its status\"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = instance.document", "cursor = collection.find(query).sort('_id', ASCENDING) candidates = [UnitOfWork.from_json(document) for document in cursor] else: candidates", "bson.objectid import ObjectId from pymongo import ASCENDING from pymongo.errors import DuplicateKeyError as MongoDuplicateKeyError", "from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from synergy.conf import context from synergy.db.error import DuplicateKeyError", "key to ObjectId key = ObjectId(key) query = {'_id': key} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)", "lambda timeperiod, unprocessed_only: { unit_of_work.TIMEPERIOD: {'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if", "raise LookupError(msg) return UnitOfWork.from_json(document) @thread_safe def get_reprocessing_candidates(self, since=None): \"\"\" method queries Unit Of", "caller\"\"\" if not isinstance(key, ObjectId): # cast key to ObjectId key = ObjectId(key)", "since) query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod} cursor = collection.find(query).sort('_id', ASCENDING) for document in cursor:", "MongoDB. :raises DuplicateKeyError: if such record already exist \"\"\" assert isinstance(instance, UnitOfWork) collection", "self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document in cursor] def recover_from_duplicatekeyerror(self, e): \"\"\" method", "from DuplicateKeyError \"\"\" if isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id) except", "timeperiod, unprocessed_only: { unit_of_work.TIMEPERIOD: {'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only", "COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from synergy.conf import context from synergy.db.error import DuplicateKeyError from synergy.db.model import", "self.logger.error('Unable to recover from DuplicateKeyError error due to %s' % e.message, exc_info=True) else:", "exist \"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return collection.insert(instance.document, safe=True) except", "None: raise LookupError('Unit_of_work satisfying query %r was not found' % query) return UnitOfWork.from_json(document)", "safe=True) return instance.db_id @thread_safe def insert(self, instance): \"\"\" inserts a unit of work", "from units_of_work table/collection \"\"\" def __init__(self, logger): super(UnitOfWorkDao, self).__init__() self.logger = logger self.lock", "self).__init__() self.logger = logger self.lock = RLock() self.ds = ds_manager.ds_factory(logger) @thread_safe def get_one(self,", "context.process_context: # this is a decommissioned process continue time_qualifier = context.process_context[uow.process_name].time_qualifier if time_qualifier", "(str, ObjectId)) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id, safe=True) @thread_safe def run_query(self, query): \"\"\"", "end_obj_id} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None: raise LookupError('Unit_of_work", "document is None: raise LookupError('Unit_of_work satisfying query %r was not found' % query)", "TYPE_MANAGED} if since is None: cursor = collection.find(query).sort('_id', ASCENDING) candidates = [UnitOfWork.from_json(document) for", "= time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod} cursor = collection.find(query).sort('_id', ASCENDING) for document", "e: self.logger.error('Unable to recover from DuplicateKeyError error due to %s' % e.message, exc_info=True)", "import thread_safe from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from synergy.conf import context from synergy.db.error", "MongoDuplicateKeyError from synergy.system import time_helper from synergy.system.time_qualifier import * from synergy.system.decorator import thread_safe", "\"\"\" method finds unit_of_work record and returns it to the caller\"\"\" query =", "unit of work into MongoDB. :raises DuplicateKeyError: if such record already exist \"\"\"", "update(self, instance): \"\"\" method finds unit_of_work record and change its status\"\"\" assert isinstance(instance,", "logger self.lock = RLock() self.ds = ds_manager.ds_factory(logger) @thread_safe def get_one(self, key): \"\"\" method", "unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if since is None: cursor = collection.find(query).sort('_id', ASCENDING) candidates = [UnitOfWork.from_json(document)", "if uow.process_name not in context.process_context: # this is a decommissioned process continue time_qualifier", "str(key) self.logger.warn(msg) raise LookupError(msg) return UnitOfWork.from_json(document) @thread_safe def get_reprocessing_candidates(self, since=None): \"\"\" method queries", "def recover_from_duplicatekeyerror(self, e): \"\"\" method tries to recover from DuplicateKeyError \"\"\" if isinstance(e,", "Of Work whose <start_timeperiod> is younger than <since> and who could be candidates", "import unit_of_work from synergy.db.model.unit_of_work import UnitOfWork from synergy.db.manager import ds_manager QUERY_GET_FREERUN_SINCE = lambda", "time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod} cursor = collection.find(query).sort('_id', ASCENDING) for document in", "found' % query) return UnitOfWork.from_json(document) @thread_safe def update(self, instance): \"\"\" method finds unit_of_work", "of filtered UnitOfWork records \"\"\" cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document", "== 0: raise LookupError('MongoDB has no reprocessing candidates units of work') return candidates", "in cursor] def recover_from_duplicatekeyerror(self, e): \"\"\" method tries to recover from DuplicateKeyError \"\"\"", "to the caller\"\"\" if not isinstance(key, ObjectId): # cast key to ObjectId key", "= RLock() self.ds = ds_manager.ds_factory(logger) @thread_safe def get_one(self, key): \"\"\" method finds unit_of_work", "from synergy.system.decorator import thread_safe from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from synergy.conf import context", "if process_specific_since <= uow.start_timeperiod: candidates.append(uow) if len(candidates) == 0: raise LookupError('MongoDB has no", "finds unit_of_work record and returns it to the caller\"\"\" if not isinstance(key, ObjectId):", "@thread_safe def get_one(self, key): \"\"\" method finds unit_of_work record and returns it to", "self.ds.connection(COLLECTION_UNIT_OF_WORK) document = instance.document if instance.db_id: document['_id'] = ObjectId(instance.db_id) instance.db_id = collection.save(document, safe=True)", "timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED if unprocessed_only else None} } class UnitOfWorkDao(object):", "finds unit_of_work record and change its status\"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK)", "<since> and who could be candidates for re-processing \"\"\" collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) query", "timeperiod, start_obj_id, end_obj_id): \"\"\" method finds unit_of_work record and returns it to the", "time_qualifier = QUALIFIER_HOURLY process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since) if process_specific_since <= uow.start_timeperiod: candidates.append(uow) if", "DuplicateKeyError as MongoDuplicateKeyError from synergy.system import time_helper from synergy.system.time_qualifier import * from synergy.system.decorator", "if not isinstance(key, ObjectId): # cast key to ObjectId key = ObjectId(key) query", "isinstance(uow_id, (str, ObjectId)) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id, safe=True) @thread_safe def run_query(self, query):", "class UnitOfWorkDao(object): \"\"\" Thread-safe Data Access Object from units_of_work table/collection \"\"\" def __init__(self,", "\"\"\" cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document in cursor] def recover_from_duplicatekeyerror(self,", "threading import RLock from bson.objectid import ObjectId from pymongo import ASCENDING from pymongo.errors", "% str(key) self.logger.warn(msg) raise LookupError(msg) return UnitOfWork.from_json(document) @thread_safe def get_reprocessing_candidates(self, since=None): \"\"\" method", "in context.process_context: # this is a decommissioned process continue time_qualifier = context.process_context[uow.process_name].time_qualifier if", "# this is a decommissioned process continue time_qualifier = context.process_context[uow.process_name].time_qualifier if time_qualifier ==", "since) if process_specific_since <= uow.start_timeperiod: candidates.append(uow) if len(candidates) == 0: raise LookupError('MongoDB has", "else: candidates = [] yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, since) query[unit_of_work.START_TIMEPERIOD] = {'$gte': yearly_timeperiod} cursor", "ds_manager QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only: { unit_of_work.TIMEPERIOD: {'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE:", "get_one(self, key): \"\"\" method finds unit_of_work record and returns it to the caller\"\"\"", "method tries to recover from DuplicateKeyError \"\"\" if isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name,", "reprocessing candidates units of work') return candidates @thread_safe def get_by_params(self, process_name, timeperiod, start_obj_id,", "= '<NAME>' from threading import RLock from bson.objectid import ObjectId from pymongo import", "decommissioned process continue time_qualifier = context.process_context[uow.process_name].time_qualifier if time_qualifier == QUALIFIER_REAL_TIME: time_qualifier = QUALIFIER_HOURLY", "and returns a list of filtered UnitOfWork records \"\"\" cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query)", "= time_helper.cast_to_time_qualifier(time_qualifier, since) if process_specific_since <= uow.start_timeperiod: candidates.append(uow) if len(candidates) == 0: raise", "} class UnitOfWorkDao(object): \"\"\" Thread-safe Data Access Object from units_of_work table/collection \"\"\" def", "units_of_work table/collection \"\"\" def __init__(self, logger): super(UnitOfWorkDao, self).__init__() self.logger = logger self.lock =", "e.end_id) except LookupError as e: self.logger.error('Unable to recover from DuplicateKeyError error due to", "def insert(self, instance): \"\"\" inserts a unit of work into MongoDB. :raises DuplicateKeyError:", "time_helper from synergy.system.time_qualifier import * from synergy.system.decorator import thread_safe from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK,", "instance.db_id: document['_id'] = ObjectId(instance.db_id) instance.db_id = collection.save(document, safe=True) return instance.db_id @thread_safe def insert(self,", "to recover from DuplicateKeyError \"\"\" if isinstance(e, DuplicateKeyError): try: return self.get_by_params(e.process_name, e.timeperiod, e.start_id,", "of work') return candidates @thread_safe def get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id): \"\"\" method", "synergy.db.model import unit_of_work from synergy.db.model.unit_of_work import UnitOfWork from synergy.db.manager import ds_manager QUERY_GET_FREERUN_SINCE =", "= self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id, safe=True) @thread_safe def run_query(self, query): \"\"\" method runs the", "= instance.document if instance.db_id: document['_id'] = ObjectId(instance.db_id) instance.db_id = collection.save(document, safe=True) return instance.db_id", "key = ObjectId(key) query = {'_id': key} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query)", "UnitOfWork from synergy.db.manager import ds_manager QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only: { unit_of_work.TIMEPERIOD: {'$gte':", "@thread_safe def run_query(self, query): \"\"\" method runs the query and returns a list", "context.process_context[uow.process_name].time_qualifier if time_qualifier == QUALIFIER_REAL_TIME: time_qualifier = QUALIFIER_HOURLY process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since) if", "synergy.conf import context from synergy.db.error import DuplicateKeyError from synergy.db.model import unit_of_work from synergy.db.model.unit_of_work", "raise LookupError('MongoDB has no reprocessing candidates units of work') return candidates @thread_safe def", "record and returns it to the caller\"\"\" query = {unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD: timeperiod,", "from synergy.db.model.unit_of_work import UnitOfWork from synergy.db.manager import ds_manager QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only:", "isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return collection.insert(instance.document, safe=True) except MongoDuplicateKeyError as e:", "import ds_manager QUERY_GET_FREERUN_SINCE = lambda timeperiod, unprocessed_only: { unit_of_work.TIMEPERIOD: {'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN,", "is None: msg = 'Unit_of_work with ID=%s was not found' % str(key) self.logger.warn(msg)", "if document is None: msg = 'Unit_of_work with ID=%s was not found' %", "status\"\"\" assert isinstance(instance, UnitOfWork) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = instance.document if instance.db_id: document['_id']", "= collection.find(query).sort('_id', ASCENDING) for document in cursor: uow = UnitOfWork.from_json(document) if uow.process_name not", "UnitOfWork.from_json(document) @thread_safe def get_reprocessing_candidates(self, since=None): \"\"\" method queries Unit Of Work whose <start_timeperiod>", "import time_helper from synergy.system.time_qualifier import * from synergy.system.decorator import thread_safe from synergy.scheduler.scheduler_constants import", "def get_reprocessing_candidates(self, since=None): \"\"\" method queries Unit Of Work whose <start_timeperiod> is younger", "for re-processing \"\"\" collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]},", "returns it to the caller\"\"\" if not isinstance(key, ObjectId): # cast key to", "cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document in cursor] def recover_from_duplicatekeyerror(self, e):", "cast key to ObjectId key = ObjectId(key) query = {'_id': key} collection =", "import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from synergy.conf import context from synergy.db.error import DuplicateKeyError from synergy.db.model", "document = instance.document if instance.db_id: document['_id'] = ObjectId(instance.db_id) instance.db_id = collection.save(document, safe=True) return", "return UnitOfWork.from_json(document) @thread_safe def update(self, instance): \"\"\" method finds unit_of_work record and change", "queries Unit Of Work whose <start_timeperiod> is younger than <since> and who could", "no reprocessing candidates units of work') return candidates @thread_safe def get_by_params(self, process_name, timeperiod,", "\"\"\" def __init__(self, logger): super(UnitOfWorkDao, self).__init__() self.logger = logger self.lock = RLock() self.ds", "cursor] def recover_from_duplicatekeyerror(self, e): \"\"\" method tries to recover from DuplicateKeyError \"\"\" if", "exc = DuplicateKeyError(instance.process_name, instance.start_timeperiod, instance.start_id, instance.end_id, e) raise exc @thread_safe def remove(self, uow_id):", "= 'Unit_of_work with ID=%s was not found' % str(key) self.logger.warn(msg) raise LookupError(msg) return", "= context.process_context[uow.process_name].time_qualifier if time_qualifier == QUALIFIER_REAL_TIME: time_qualifier = QUALIFIER_HOURLY process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since)", "unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is", "UnitOfWork records \"\"\" cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query) return [UnitOfWork.from_json(document) for document in cursor]", "msg = 'Unable to recover from DuplicateKeyError due to unspecified unit_of_work primary key'", "caller\"\"\" query = {unit_of_work.PROCESS_NAME: process_name, unit_of_work.TIMEPERIOD: timeperiod, unit_of_work.START_OBJ_ID: start_obj_id, unit_of_work.END_OBJ_ID: end_obj_id} collection =", "not in context.process_context: # this is a decommissioned process continue time_qualifier = context.process_context[uow.process_name].time_qualifier", "% query) return UnitOfWork.from_json(document) @thread_safe def update(self, instance): \"\"\" method finds unit_of_work record", "collection.find_one(query) if document is None: msg = 'Unit_of_work with ID=%s was not found'", "not found' % query) return UnitOfWork.from_json(document) @thread_safe def update(self, instance): \"\"\" method finds", "the query and returns a list of filtered UnitOfWork records \"\"\" cursor =", "Unit Of Work whose <start_timeperiod> is younger than <since> and who could be", "collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return collection.insert(instance.document, safe=True) except MongoDuplicateKeyError as e: exc =", "{unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if since is None: cursor =", "e.message, exc_info=True) else: msg = 'Unable to recover from DuplicateKeyError due to unspecified", "with ID=%s was not found' % str(key) self.logger.warn(msg) raise LookupError(msg) return UnitOfWork.from_json(document) @thread_safe", "else None} } class UnitOfWorkDao(object): \"\"\" Thread-safe Data Access Object from units_of_work table/collection", "self.ds.connection(COLLECTION_UNIT_OF_WORK) try: return collection.insert(instance.document, safe=True) except MongoDuplicateKeyError as e: exc = DuplicateKeyError(instance.process_name, instance.start_timeperiod,", "for document in cursor] def recover_from_duplicatekeyerror(self, e): \"\"\" method tries to recover from", "and returns it to the caller\"\"\" if not isinstance(key, ObjectId): # cast key", "uow.start_timeperiod: candidates.append(uow) if len(candidates) == 0: raise LookupError('MongoDB has no reprocessing candidates units", "the caller\"\"\" if not isinstance(key, ObjectId): # cast key to ObjectId key =", "was not found' % str(key) self.logger.warn(msg) raise LookupError(msg) return UnitOfWork.from_json(document) @thread_safe def get_reprocessing_candidates(self,", "for document in cursor: uow = UnitOfWork.from_json(document) if uow.process_name not in context.process_context: #", "self.ds.connection(COLLECTION_UNIT_OF_WORK) query = {unit_of_work.STATE: {'$in': [unit_of_work.STATE_IN_PROGRESS, unit_of_work.STATE_INVALID, unit_of_work.STATE_REQUESTED]}, unit_of_work.UNIT_OF_WORK_TYPE: TYPE_MANAGED} if since is", "e.start_id, e.end_id) except LookupError as e: self.logger.error('Unable to recover from DuplicateKeyError error due", "= lambda timeperiod, unprocessed_only: { unit_of_work.TIMEPERIOD: {'$gte': timeperiod}, unit_of_work.UNIT_OF_WORK_TYPE: unit_of_work.TYPE_FREERUN, unit_of_work.STATE: {'$ne': unit_of_work.STATE_PROCESSED", "process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since) if process_specific_since <= uow.start_timeperiod: candidates.append(uow) if len(candidates) == 0:", "= {'_id': key} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None:", "units of work') return candidates @thread_safe def get_by_params(self, process_name, timeperiod, start_obj_id, end_obj_id): \"\"\"", "synergy.system import time_helper from synergy.system.time_qualifier import * from synergy.system.decorator import thread_safe from synergy.scheduler.scheduler_constants", "= logger self.lock = RLock() self.ds = ds_manager.ds_factory(logger) @thread_safe def get_one(self, key): \"\"\"", "process_specific_since <= uow.start_timeperiod: candidates.append(uow) if len(candidates) == 0: raise LookupError('MongoDB has no reprocessing", "assert isinstance(uow_id, (str, ObjectId)) collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) return collection.remove(uow_id, safe=True) @thread_safe def run_query(self,", "time_qualifier == QUALIFIER_REAL_TIME: time_qualifier = QUALIFIER_HOURLY process_specific_since = time_helper.cast_to_time_qualifier(time_qualifier, since) if process_specific_since <=", "__author__ = '<NAME>' from threading import RLock from bson.objectid import ObjectId from pymongo", "RLock() self.ds = ds_manager.ds_factory(logger) @thread_safe def get_one(self, key): \"\"\" method finds unit_of_work record", "method finds unit_of_work record and returns it to the caller\"\"\" query = {unit_of_work.PROCESS_NAME:", "key} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None: msg =", "if instance.db_id: document['_id'] = ObjectId(instance.db_id) instance.db_id = collection.save(document, safe=True) return instance.db_id @thread_safe def", "import DuplicateKeyError as MongoDuplicateKeyError from synergy.system import time_helper from synergy.system.time_qualifier import * from", "return [UnitOfWork.from_json(document) for document in cursor] def recover_from_duplicatekeyerror(self, e): \"\"\" method tries to", "{'_id': key} collection = self.ds.connection(COLLECTION_UNIT_OF_WORK) document = collection.find_one(query) if document is None: msg", "synergy.system.time_qualifier import * from synergy.system.decorator import thread_safe from synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from", "from synergy.db.error import DuplicateKeyError from synergy.db.model import unit_of_work from synergy.db.model.unit_of_work import UnitOfWork from", "collection.save(document, safe=True) return instance.db_id @thread_safe def insert(self, instance): \"\"\" inserts a unit of", "synergy.scheduler.scheduler_constants import COLLECTION_UNIT_OF_WORK, TYPE_MANAGED from synergy.conf import context from synergy.db.error import DuplicateKeyError from" ]
[ "= 2 return a + b # Boilerplate if __name__ == \"__main__\": import", "= 1 else: b = 2 return a + b # Boilerplate if", "b = 1 else: b = 2 return a + b # Boilerplate", "1 else: a = 2 if(1>2): b = 1 else: b = 2", "a + b # Boilerplate if __name__ == \"__main__\": import sys ret=main() sys.exit(ret)", "b = 2 return a + b # Boilerplate if __name__ == \"__main__\":", "= 2 if(1>2): b = 1 else: b = 2 return a +", "if(1>2): b = 1 else: b = 2 return a + b #", "a = 2 if(1>2): b = 1 else: b = 2 return a", "return a + b # Boilerplate if __name__ == \"__main__\": import sys ret=main()", "1 else: b = 2 return a + b # Boilerplate if __name__", "if(1<2): a = 1 else: a = 2 if(1>2): b = 1 else:", "2 return a + b # Boilerplate if __name__ == \"__main__\": import sys", "else: b = 2 return a + b # Boilerplate if __name__ ==", "main(): a=0 b=0 if(1<2): a = 1 else: a = 2 if(1>2): b", "def main(): a=0 b=0 if(1<2): a = 1 else: a = 2 if(1>2):", "2 if(1>2): b = 1 else: b = 2 return a + b", "a = 1 else: a = 2 if(1>2): b = 1 else: b", "= 1 else: a = 2 if(1>2): b = 1 else: b =", "else: a = 2 if(1>2): b = 1 else: b = 2 return", "b=0 if(1<2): a = 1 else: a = 2 if(1>2): b = 1", "a=0 b=0 if(1<2): a = 1 else: a = 2 if(1>2): b =" ]
[ "gt in gts]): lossesList, outputsList = self.trainOneSide( *process((inputL, inputR, gts)), returnOutputs=returnOutputs, kitti=kitti, weights=weights", "output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp ) if gt is not None else None) return", "self.outputMaxDisp // 2 self.getModel = RawPSMNetDown def loss(self, outputs, gts, kitti=False, outputMaxDisp=None): if", "in losses], dispOuts def train(self, batch, returnOutputs=False, kitti=False, weights=(1, 0), progress=0): myUtils.assertBatchLen(batch, 8)", "+ suffix + side] = process(output) return losses, outputs def test(self, batch, evalType='l1',", "dispScale, multiple): super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple) self.pool = nn.AvgPool2d((2, 2)) # input: RGB", "if outputMaxDisp is not None: raise Exception('Error: outputMaxDisp of PSMNetDown has no use!')", "forward(self, left, right): outDispHighs = super(RawPSMNetDown, self).forward(left, right) outDispLows = myUtils.forNestingList(outDispHighs, lambda disp:", "/ 2) return outDispHighs, outDispLows class PSMNetDown(PSMNet): # dataset: only used for suffix", "gt, outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp * 2, self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss( output, gt,", "= myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) / 2) return outDispHighs, outDispLows class PSMNetDown(PSMNet): #", "self.optimizer.zero_grad() outDispHighs, outDispLows = self.model.forward(imgL, imgR) losses = self.loss((outDispHighs, outDispLows), gts, kitti=kitti) loss", "8) self.trainPrepare() losses = myUtils.NameValues() outputs = collections.OrderedDict() imgL, imgR = batch.highResRGBs() for", ".. import SR import collections import torch.nn.parallel as P from .PSMNet import *", "2 self.getModel = RawPSMNetDown def loss(self, outputs, gts, kitti=False, outputMaxDisp=None): if outputMaxDisp is", "side in zip(rawOutputs, ('L', 'R')): if rawOutputsSide is not None: (outDispHigh, outDispLow) =", "outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp * 2, self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss( output, gt, kitti=kitti,", "imgR), (imgR, imgL), zip(batch.highResDisps(), batch.lowResDisps()), (lambda im: im, myUtils.flipLR), ('L', 'R') ): if", "of saveFolderName def __init__(self, maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None, saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp,", "outputsList): outputs['outputDisp' + suffix + side] = process(output) return losses, outputs def test(self,", "dispOuts = [] if returnOutputs: with torch.no_grad(): dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2)) dispOuts.append(outDispLows[2].detach()", "super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti) for rawOutputsSide, side in zip(rawOutputs, ('L', 'R')): if", "for suffix, loss in zip(('', 'DispHigh', 'Disp'), lossesList): if loss is not None:", "zip(outputs, gts, (self.outputMaxDisp * 2, self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss( output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp )", "class RawPSMNetDown(RawPSMNetScale): def __init__(self, maxdisp, dispScale, multiple): super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple) self.pool =", ".RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng from ..Model import Model from .. import SR", "self).forward(left, right) outDispLows = myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) / 2) return outDispHighs, outDispLows", "zip(rawOutputs, ('L', 'R')): if rawOutputsSide is not None: (outDispHigh, outDispLow) = rawOutputsSide if", "import torch.optim as optim import torch import torch.nn.functional as F import torch.nn as", "torch.no_grad(): dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2)) dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp) losses = [loss] +", "value range 0~1 # outputs: disparity range 0~self.maxdisp * self.dispScale / 2 def", "outDispHighs, outDispLows class PSMNetDown(PSMNet): # dataset: only used for suffix of saveFolderName def", "cuda, half, stage, dataset, saveFolderSuffix) self.outputMaxDisp = self.outputMaxDisp // 2 self.getModel = RawPSMNetDown", "from ..Model import Model from .. import SR import collections import torch.nn.parallel as", "def forward(self, left, right): outDispHighs = super(RawPSMNetDown, self).forward(left, right) outDispLows = myUtils.forNestingList(outDispHighs, lambda", "zip(batch.highResDisps(), batch.lowResDisps()), (lambda im: im, myUtils.flipLR), ('L', 'R') ): if not all([gt is", "returnOutputs, kitti) for rawOutputsSide, side in zip(rawOutputs, ('L', 'R')): if rawOutputsSide is not", "= loss if returnOutputs: for suffix, output in zip(('High', 'Low'), outputsList): outputs['outputDisp' +", "myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half) scores, outputs, rawOutputs = super(PSMNetDown, self).test(batch, evalType, returnOutputs,", "as scaled_loss: scaled_loss.backward() self.optimizer.step() dispOuts = [] if returnOutputs: with torch.no_grad(): dispOuts.append(outDispHighs[2].detach() /", "zip( (imgL, imgR), (imgR, imgL), zip(batch.highResDisps(), batch.lowResDisps()), (lambda im: im, myUtils.flipLR), ('L', 'R')", "import SR import collections import torch.nn.parallel as P from .PSMNet import * class", "inputR, gts)), returnOutputs=returnOutputs, kitti=kitti, weights=weights ) for suffix, loss in zip(('', 'DispHigh', 'Disp'),", "returnOutputs=returnOutputs, kitti=kitti, weights=weights ) for suffix, loss in zip(('', 'DispHigh', 'Disp'), lossesList): if", "(lambda im: im, myUtils.flipLR), ('L', 'R') ): if not all([gt is None for", "has no use!') losses = [] for output, gt, outputMaxDisp in zip(outputs, gts,", "losses = [loss] + losses return [loss.data.item() for loss in losses], dispOuts def", "for rawOutputsSide, side in zip(rawOutputs, ('L', 'R')): if rawOutputsSide is not None: (outDispHigh,", "8) batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half) scores, outputs, rawOutputs = super(PSMNetDown,", "gts, (self.outputMaxDisp * 2, self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss( output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp ) if", "/ (self.outputMaxDisp * 2)) dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp) losses = [loss] + losses return", "= [loss] + losses return [loss.data.item() for loss in losses], dispOuts def train(self,", "/ 2 def forward(self, left, right): outDispHighs = super(RawPSMNetDown, self).forward(left, right) outDispLows =", "torch.nn as nn from evaluation import evalFcn from utils import myUtils from .RawPSMNet", "= [] for output, gt, outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp * 2, self.outputMaxDisp)):", "dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2)) dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp) losses = [loss] + losses", "PSMNetDown(PSMNet): # dataset: only used for suffix of saveFolderName def __init__(self, maxdisp=192, dispScale=1,", "as rawPSMNet from .RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng from ..Model import Model from", "self.pool = nn.AvgPool2d((2, 2)) # input: RGB value range 0~1 # outputs: disparity", "saveFolderName def __init__(self, maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None, saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp, dispScale,", "+ losses return [loss.data.item() for loss in losses], dispOuts def train(self, batch, returnOutputs=False,", "__init__(self, maxdisp, dispScale, multiple): super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple) self.pool = nn.AvgPool2d((2, 2)) #", "with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() self.optimizer.step() dispOuts = [] if returnOutputs: with", "saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half, stage, dataset, saveFolderSuffix) self.outputMaxDisp = self.outputMaxDisp //", ".RawPSMNet import stackhourglass as rawPSMNet from .RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng from ..Model", "gts, returnOutputs=False, kitti=False, weights=(1, 0)): self.optimizer.zero_grad() outDispHighs, outDispLows = self.model.forward(imgL, imgR) losses =", "in zip(('', 'DispHigh', 'Disp'), lossesList): if loss is not None: losses['loss' + suffix", "loss in losses], dispOuts def train(self, batch, returnOutputs=False, kitti=False, weights=(1, 0), progress=0): myUtils.assertBatchLen(batch,", "myUtils.assertBatchLen(batch, 8) self.trainPrepare() losses = myUtils.NameValues() outputs = collections.OrderedDict() imgL, imgR = batch.highResRGBs()", "saveFolderSuffix) self.outputMaxDisp = self.outputMaxDisp // 2 self.getModel = RawPSMNetDown def loss(self, outputs, gts,", "losses, outputs def test(self, batch, evalType='l1', returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch, 8) batch = myUtils.Batch(batch.highResRGBs()", "im: im, myUtils.flipLR), ('L', 'R') ): if not all([gt is None for gt", "right): outDispHighs = super(RawPSMNetDown, self).forward(left, right) outDispLows = myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) /", "self).test(batch, evalType, returnOutputs, kitti) for rawOutputsSide, side in zip(rawOutputs, ('L', 'R')): if rawOutputsSide", "('L', 'R') ): if not all([gt is None for gt in gts]): lossesList,", "im, myUtils.flipLR), ('L', 'R') ): if not all([gt is None for gt in", "= myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half) scores, outputs, rawOutputs = super(PSMNetDown, self).test(batch, evalType,", "self.dispScale / 2 def forward(self, left, right): outDispHighs = super(RawPSMNetDown, self).forward(left, right) outDispLows", "loss(self, outputs, gts, kitti=False, outputMaxDisp=None): if outputMaxDisp is not None: raise Exception('Error: outputMaxDisp", "* loss for weight, loss in zip(weights, losses) if loss is not None])", "dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None, saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half, stage, dataset,", "[] if returnOutputs: with torch.no_grad(): dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2)) dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp)", "PSMNetDown has no use!') losses = [] for output, gt, outputMaxDisp in zip(outputs,", "if returnOutputs: with torch.no_grad(): dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2)) dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp) losses", "* 2, self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss( output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp ) if gt is", "in zip(('High', 'Low'), outputsList): outputs['outputDisp' + suffix + side] = process(output) return losses,", "myUtils.flipLR), ('L', 'R') ): if not all([gt is None for gt in gts]):", "outputs, gts, kitti=False, outputMaxDisp=None): if outputMaxDisp is not None: raise Exception('Error: outputMaxDisp of", "outputMaxDisp=outputMaxDisp ) if gt is not None else None) return losses def trainOneSide(self,", "gts]): lossesList, outputsList = self.trainOneSide( *process((inputL, inputR, gts)), returnOutputs=returnOutputs, kitti=kitti, weights=weights ) for", "super(RawPSMNetDown, self).forward(left, right) outDispLows = myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) / 2) return outDispHighs,", "loss is not None: losses['loss' + suffix + side] = loss if returnOutputs:", "evalFcn from utils import myUtils from .RawPSMNet import stackhourglass as rawPSMNet from .RawPSMNet_TieCheng", "__init__(self, maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None, saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half,", "losses = self.loss((outDispHighs, outDispLows), gts, kitti=kitti) loss = sum([weight * loss for weight,", "(imgL, imgR), (imgR, imgL), zip(batch.highResDisps(), batch.lowResDisps()), (lambda im: im, myUtils.flipLR), ('L', 'R') ):", "= nn.AvgPool2d((2, 2)) # input: RGB value range 0~1 # outputs: disparity range", "returnOutputs: for suffix, output in zip(('High', 'Low'), outputsList): outputs['outputDisp' + suffix + side]", "returnOutputs=False, kitti=False, weights=(1, 0), progress=0): myUtils.assertBatchLen(batch, 8) self.trainPrepare() losses = myUtils.NameValues() outputs =", "torch.nn.functional as F import torch.nn as nn from evaluation import evalFcn from utils", "use!') losses = [] for output, gt, outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp *", "process, side in zip( (imgL, imgR), (imgR, imgL), zip(batch.highResDisps(), batch.lowResDisps()), (lambda im: im,", "rawPSMNet_TieCheng from ..Model import Model from .. import SR import collections import torch.nn.parallel", "outputMaxDisp=None): if outputMaxDisp is not None: raise Exception('Error: outputMaxDisp of PSMNetDown has no", "else None) return losses def trainOneSide(self, imgL, imgR, gts, returnOutputs=False, kitti=False, weights=(1, 0)):", "cuda=True, half=False, stage='unnamed', dataset=None, saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half, stage, dataset, saveFolderSuffix)", "self.optimizer) as scaled_loss: scaled_loss.backward() self.optimizer.step() dispOuts = [] if returnOutputs: with torch.no_grad(): dispOuts.append(outDispHighs[2].detach()", "'DispHigh', 'Disp'), lossesList): if loss is not None: losses['loss' + suffix + side]", "kitti=kitti) loss = sum([weight * loss for weight, loss in zip(weights, losses) if", "return losses, outputs def test(self, batch, evalType='l1', returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch, 8) batch =", "zip(weights, losses) if loss is not None]) with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward()", "range 0~1 # outputs: disparity range 0~self.maxdisp * self.dispScale / 2 def forward(self,", "myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) / 2) return outDispHighs, outDispLows class PSMNetDown(PSMNet): # dataset:", "= self.loss((outDispHighs, outDispLows), gts, kitti=kitti) loss = sum([weight * loss for weight, loss", "not None: (outDispHigh, outDispLow) = rawOutputsSide if returnOutputs: if outDispHigh is not None:", "import torch.nn.parallel as P from .PSMNet import * class RawPSMNetDown(RawPSMNetScale): def __init__(self, maxdisp,", "only used for suffix of saveFolderName def __init__(self, maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed',", "+ side] = process(output) return losses, outputs def test(self, batch, evalType='l1', returnOutputs=False, kitti=False):", "torch.optim as optim import torch import torch.nn.functional as F import torch.nn as nn", "weight, loss in zip(weights, losses) if loss is not None]) with self.amp_handle.scale_loss(loss, self.optimizer)", "batch, returnOutputs=False, kitti=False, weights=(1, 0), progress=0): myUtils.assertBatchLen(batch, 8) self.trainPrepare() losses = myUtils.NameValues() outputs", "not None else None) return losses def trainOneSide(self, imgL, imgR, gts, returnOutputs=False, kitti=False,", "with torch.no_grad(): dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2)) dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp) losses = [loss]", "maxdisp, dispScale, multiple): super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple) self.pool = nn.AvgPool2d((2, 2)) # input:", "stage, dataset, saveFolderSuffix) self.outputMaxDisp = self.outputMaxDisp // 2 self.getModel = RawPSMNetDown def loss(self,", "= super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti) for rawOutputsSide, side in zip(rawOutputs, ('L', 'R')):", "not None: raise Exception('Error: outputMaxDisp of PSMNetDown has no use!') losses = []", "dataset: only used for suffix of saveFolderName def __init__(self, maxdisp=192, dispScale=1, cuda=True, half=False,", "nn from evaluation import evalFcn from utils import myUtils from .RawPSMNet import stackhourglass", "torch import torch.nn.functional as F import torch.nn as nn from evaluation import evalFcn", "None: raise Exception('Error: outputMaxDisp of PSMNetDown has no use!') losses = [] for", "disp: self.pool(disp) / 2) return outDispHighs, outDispLows class PSMNetDown(PSMNet): # dataset: only used", "as F import torch.nn as nn from evaluation import evalFcn from utils import", "as nn from evaluation import evalFcn from utils import myUtils from .RawPSMNet import", "from evaluation import evalFcn from utils import myUtils from .RawPSMNet import stackhourglass as", "dataset=None, saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half, stage, dataset, saveFolderSuffix) self.outputMaxDisp = self.outputMaxDisp", "def train(self, batch, returnOutputs=False, kitti=False, weights=(1, 0), progress=0): myUtils.assertBatchLen(batch, 8) self.trainPrepare() losses =", "dispScale, multiple) self.pool = nn.AvgPool2d((2, 2)) # input: RGB value range 0~1 #", "right) outDispLows = myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) / 2) return outDispHighs, outDispLows class", "returnOutputs=False, kitti=False, weights=(1, 0)): self.optimizer.zero_grad() outDispHighs, outDispLows = self.model.forward(imgL, imgR) losses = self.loss((outDispHighs,", "losses return [loss.data.item() for loss in losses], dispOuts def train(self, batch, returnOutputs=False, kitti=False,", "kitti=kitti, weights=weights ) for suffix, loss in zip(('', 'DispHigh', 'Disp'), lossesList): if loss", "imgL, imgR, gts, returnOutputs=False, kitti=False, weights=(1, 0)): self.optimizer.zero_grad() outDispHighs, outDispLows = self.model.forward(imgL, imgR)", ".PSMNet import * class RawPSMNetDown(RawPSMNetScale): def __init__(self, maxdisp, dispScale, multiple): super(RawPSMNetDown, self).__init__(maxdisp, dispScale,", "2 def forward(self, left, right): outDispHighs = super(RawPSMNetDown, self).forward(left, right) outDispLows = myUtils.forNestingList(outDispHighs,", "imgR) losses = self.loss((outDispHighs, outDispLows), gts, kitti=kitti) loss = sum([weight * loss for", ") for suffix, loss in zip(('', 'DispHigh', 'Disp'), lossesList): if loss is not", "outDispHigh is not None: outputs['outputDispHigh' + side] = outDispHigh / (self.outputMaxDisp * 2)", "= myUtils.NameValues() outputs = collections.OrderedDict() imgL, imgR = batch.highResRGBs() for inputL, inputR, gts,", "if not all([gt is None for gt in gts]): lossesList, outputsList = self.trainOneSide(", "outDispHighs = super(RawPSMNetDown, self).forward(left, right) outDispLows = myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) / 2)", "self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() self.optimizer.step() dispOuts = [] if returnOutputs: with torch.no_grad():", "= RawPSMNetDown def loss(self, outputs, gts, kitti=False, outputMaxDisp=None): if outputMaxDisp is not None:", "# dataset: only used for suffix of saveFolderName def __init__(self, maxdisp=192, dispScale=1, cuda=True,", "is not None: losses['loss' + suffix + side] = loss if returnOutputs: for", "myUtils.NameValues() outputs = collections.OrderedDict() imgL, imgR = batch.highResRGBs() for inputL, inputR, gts, process,", "dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp) losses = [loss] + losses return [loss.data.item() for loss in", "gts, kitti=False, outputMaxDisp=None): if outputMaxDisp is not None: raise Exception('Error: outputMaxDisp of PSMNetDown", "= [] if returnOutputs: with torch.no_grad(): dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2)) dispOuts.append(outDispLows[2].detach() /", "2, self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss( output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp ) if gt is not", "from .PSMNet import * class RawPSMNetDown(RawPSMNetScale): def __init__(self, maxdisp, dispScale, multiple): super(RawPSMNetDown, self).__init__(maxdisp,", "for suffix, output in zip(('High', 'Low'), outputsList): outputs['outputDisp' + suffix + side] =", "[] for output, gt, outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp * 2, self.outputMaxDisp)): losses.append(super(PSMNetDown,", "rawOutputsSide, side in zip(rawOutputs, ('L', 'R')): if rawOutputsSide is not None: (outDispHigh, outDispLow)", "returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch, 8) batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half) scores, outputs,", "batch, evalType='l1', returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch, 8) batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half)", "('L', 'R')): if rawOutputsSide is not None: (outDispHigh, outDispLow) = rawOutputsSide if returnOutputs:", "self.outputMaxDisp = self.outputMaxDisp // 2 self.getModel = RawPSMNetDown def loss(self, outputs, gts, kitti=False,", "def loss(self, outputs, gts, kitti=False, outputMaxDisp=None): if outputMaxDisp is not None: raise Exception('Error:", "'Low'), outputsList): outputs['outputDisp' + suffix + side] = process(output) return losses, outputs def", "outputMaxDisp of PSMNetDown has no use!') losses = [] for output, gt, outputMaxDisp", "2)) dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp) losses = [loss] + losses return [loss.data.item() for loss", "loss is not None]) with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() self.optimizer.step() dispOuts =", "suffix + side] = process(output) return losses, outputs def test(self, batch, evalType='l1', returnOutputs=False,", "myUtils.assertBatchLen(batch, 8) batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half) scores, outputs, rawOutputs =", "def test(self, batch, evalType='l1', returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch, 8) batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(),", "outDispLows = self.model.forward(imgL, imgR) losses = self.loss((outDispHighs, outDispLows), gts, kitti=kitti) loss = sum([weight", "RawPSMNetDown def loss(self, outputs, gts, kitti=False, outputMaxDisp=None): if outputMaxDisp is not None: raise", "loss if returnOutputs: for suffix, output in zip(('High', 'Low'), outputsList): outputs['outputDisp' + suffix", "self.trainPrepare() losses = myUtils.NameValues() outputs = collections.OrderedDict() imgL, imgR = batch.highResRGBs() for inputL,", "not None: outputs['outputDispHigh' + side] = outDispHigh / (self.outputMaxDisp * 2) return scores,", "(self.outputMaxDisp * 2)) dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp) losses = [loss] + losses return [loss.data.item()", "as rawPSMNet_TieCheng from ..Model import Model from .. import SR import collections import", "half=False, stage='unnamed', dataset=None, saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half, stage, dataset, saveFolderSuffix) self.outputMaxDisp", "train(self, batch, returnOutputs=False, kitti=False, weights=(1, 0), progress=0): myUtils.assertBatchLen(batch, 8) self.trainPrepare() losses = myUtils.NameValues()", "gt is not None else None) return losses def trainOneSide(self, imgL, imgR, gts,", "is not None: raise Exception('Error: outputMaxDisp of PSMNetDown has no use!') losses =", "None for gt in gts]): lossesList, outputsList = self.trainOneSide( *process((inputL, inputR, gts)), returnOutputs=returnOutputs,", "for loss in losses], dispOuts def train(self, batch, returnOutputs=False, kitti=False, weights=(1, 0), progress=0):", "import evalFcn from utils import myUtils from .RawPSMNet import stackhourglass as rawPSMNet from", "+ batch.lowestResDisps(), cuda=batch.cuda, half=batch.half) scores, outputs, rawOutputs = super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti)", "returnOutputs: if outDispHigh is not None: outputs['outputDispHigh' + side] = outDispHigh / (self.outputMaxDisp", "'R') ): if not all([gt is None for gt in gts]): lossesList, outputsList", "import stackhourglass as rawPSMNet_TieCheng from ..Model import Model from .. import SR import", "outputs['outputDispHigh' + side] = outDispHigh / (self.outputMaxDisp * 2) return scores, outputs, rawOutputs", "not None: losses['loss' + suffix + side] = loss if returnOutputs: for suffix,", "left, right): outDispHighs = super(RawPSMNetDown, self).forward(left, right) outDispLows = myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp)", "stackhourglass as rawPSMNet_TieCheng from ..Model import Model from .. import SR import collections", "kitti=False, weights=(1, 0)): self.optimizer.zero_grad() outDispHighs, outDispLows = self.model.forward(imgL, imgR) losses = self.loss((outDispHighs, outDispLows),", "import Model from .. import SR import collections import torch.nn.parallel as P from", "self.outputMaxDisp) losses = [loss] + losses return [loss.data.item() for loss in losses], dispOuts", "* self.dispScale / 2 def forward(self, left, right): outDispHighs = super(RawPSMNetDown, self).forward(left, right)", "0~self.maxdisp * self.dispScale / 2 def forward(self, left, right): outDispHighs = super(RawPSMNetDown, self).forward(left,", "outputsList = self.trainOneSide( *process((inputL, inputR, gts)), returnOutputs=returnOutputs, kitti=kitti, weights=weights ) for suffix, loss", "outDispLows), gts, kitti=kitti) loss = sum([weight * loss for weight, loss in zip(weights,", "if loss is not None: losses['loss' + suffix + side] = loss if", "..Model import Model from .. import SR import collections import torch.nn.parallel as P", "outputMaxDisp is not None: raise Exception('Error: outputMaxDisp of PSMNetDown has no use!') losses", "is not None: (outDispHigh, outDispLow) = rawOutputsSide if returnOutputs: if outDispHigh is not", "torch.nn.parallel as P from .PSMNet import * class RawPSMNetDown(RawPSMNetScale): def __init__(self, maxdisp, dispScale,", "disparity range 0~self.maxdisp * self.dispScale / 2 def forward(self, left, right): outDispHighs =", "suffix, output in zip(('High', 'Low'), outputsList): outputs['outputDisp' + suffix + side] = process(output)", "in zip( (imgL, imgR), (imgR, imgL), zip(batch.highResDisps(), batch.lowResDisps()), (lambda im: im, myUtils.flipLR), ('L',", "self).__init__(maxdisp, dispScale, cuda, half, stage, dataset, saveFolderSuffix) self.outputMaxDisp = self.outputMaxDisp // 2 self.getModel", "self.trainOneSide( *process((inputL, inputR, gts)), returnOutputs=returnOutputs, kitti=kitti, weights=weights ) for suffix, loss in zip(('',", "zip(('', 'DispHigh', 'Disp'), lossesList): if loss is not None: losses['loss' + suffix +", "maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None, saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half, stage,", "= self.trainOneSide( *process((inputL, inputR, gts)), returnOutputs=returnOutputs, kitti=kitti, weights=weights ) for suffix, loss in", "= batch.highResRGBs() for inputL, inputR, gts, process, side in zip( (imgL, imgR), (imgR,", "of PSMNetDown has no use!') losses = [] for output, gt, outputMaxDisp in", "None) return losses def trainOneSide(self, imgL, imgR, gts, returnOutputs=False, kitti=False, weights=(1, 0)): self.optimizer.zero_grad()", "super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half, stage, dataset, saveFolderSuffix) self.outputMaxDisp = self.outputMaxDisp // 2", "loss for weight, loss in zip(weights, losses) if loss is not None]) with", "weights=weights ) for suffix, loss in zip(('', 'DispHigh', 'Disp'), lossesList): if loss is", "for suffix of saveFolderName def __init__(self, maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None, saveFolderSuffix=''):", "outDispHighs, outDispLows = self.model.forward(imgL, imgR) losses = self.loss((outDispHighs, outDispLows), gts, kitti=kitti) loss =", "import torch import torch.nn.functional as F import torch.nn as nn from evaluation import", "as P from .PSMNet import * class RawPSMNetDown(RawPSMNetScale): def __init__(self, maxdisp, dispScale, multiple):", "suffix, loss in zip(('', 'DispHigh', 'Disp'), lossesList): if loss is not None: losses['loss'", "from .. import SR import collections import torch.nn.parallel as P from .PSMNet import", "self.model.forward(imgL, imgR) losses = self.loss((outDispHighs, outDispLows), gts, kitti=kitti) loss = sum([weight * loss", "[loss] + losses return [loss.data.item() for loss in losses], dispOuts def train(self, batch,", "(outDispHigh, outDispLow) = rawOutputsSide if returnOutputs: if outDispHigh is not None: outputs['outputDispHigh' +", "for gt in gts]): lossesList, outputsList = self.trainOneSide( *process((inputL, inputR, gts)), returnOutputs=returnOutputs, kitti=kitti,", "dispOuts def train(self, batch, returnOutputs=False, kitti=False, weights=(1, 0), progress=0): myUtils.assertBatchLen(batch, 8) self.trainPrepare() losses", "as optim import torch import torch.nn.functional as F import torch.nn as nn from", "2)) # input: RGB value range 0~1 # outputs: disparity range 0~self.maxdisp *", "* 2)) dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp) losses = [loss] + losses return [loss.data.item() for", "in gts]): lossesList, outputsList = self.trainOneSide( *process((inputL, inputR, gts)), returnOutputs=returnOutputs, kitti=kitti, weights=weights )", "= process(output) return losses, outputs def test(self, batch, evalType='l1', returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch, 8)", "optim import torch import torch.nn.functional as F import torch.nn as nn from evaluation", "not None]) with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() self.optimizer.step() dispOuts = [] if", "lambda disp: self.pool(disp) / 2) return outDispHighs, outDispLows class PSMNetDown(PSMNet): # dataset: only", "imgL, imgR = batch.highResRGBs() for inputL, inputR, gts, process, side in zip( (imgL,", "rawPSMNet from .RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng from ..Model import Model from ..", "kitti=kitti, outputMaxDisp=outputMaxDisp ) if gt is not None else None) return losses def", ") if gt is not None else None) return losses def trainOneSide(self, imgL,", "outDispLows = myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) / 2) return outDispHighs, outDispLows class PSMNetDown(PSMNet):", "Model from .. import SR import collections import torch.nn.parallel as P from .PSMNet", "(self.outputMaxDisp * 2, self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss( output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp ) if gt", "losses) if loss is not None]) with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() self.optimizer.step()", "loss = sum([weight * loss for weight, loss in zip(weights, losses) if loss", "losses = [] for output, gt, outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp * 2,", "'R')): if rawOutputsSide is not None: (outDispHigh, outDispLow) = rawOutputsSide if returnOutputs: if", "* class RawPSMNetDown(RawPSMNetScale): def __init__(self, maxdisp, dispScale, multiple): super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple) self.pool", "imgL), zip(batch.highResDisps(), batch.lowResDisps()), (lambda im: im, myUtils.flipLR), ('L', 'R') ): if not all([gt", "self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss( output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp ) if gt is not None", "batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half) scores, outputs, rawOutputs = super(PSMNetDown, self).test(batch,", "from .RawPSMNet import stackhourglass as rawPSMNet from .RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng from", "batch.highResRGBs() for inputL, inputR, gts, process, side in zip( (imgL, imgR), (imgR, imgL),", "is not None: outputs['outputDispHigh' + side] = outDispHigh / (self.outputMaxDisp * 2) return", "half, stage, dataset, saveFolderSuffix) self.outputMaxDisp = self.outputMaxDisp // 2 self.getModel = RawPSMNetDown def", "import torch.nn as nn from evaluation import evalFcn from utils import myUtils from", "self).__init__(maxdisp, dispScale, multiple) self.pool = nn.AvgPool2d((2, 2)) # input: RGB value range 0~1", "os import time import torch.optim as optim import torch import torch.nn.functional as F", "Exception('Error: outputMaxDisp of PSMNetDown has no use!') losses = [] for output, gt,", "progress=0): myUtils.assertBatchLen(batch, 8) self.trainPrepare() losses = myUtils.NameValues() outputs = collections.OrderedDict() imgL, imgR =", "losses['loss' + suffix + side] = loss if returnOutputs: for suffix, output in", "is not None else None) return losses def trainOneSide(self, imgL, imgR, gts, returnOutputs=False,", "collections.OrderedDict() imgL, imgR = batch.highResRGBs() for inputL, inputR, gts, process, side in zip(", "import stackhourglass as rawPSMNet from .RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng from ..Model import", "losses.append(super(PSMNetDown, self).loss( output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp ) if gt is not None else", "None: outputs['outputDispHigh' + side] = outDispHigh / (self.outputMaxDisp * 2) return scores, outputs,", "side] = loss if returnOutputs: for suffix, output in zip(('High', 'Low'), outputsList): outputs['outputDisp'", "None: losses['loss' + suffix + side] = loss if returnOutputs: for suffix, output", "trainOneSide(self, imgL, imgR, gts, returnOutputs=False, kitti=False, weights=(1, 0)): self.optimizer.zero_grad() outDispHighs, outDispLows = self.model.forward(imgL,", "evaluation import evalFcn from utils import myUtils from .RawPSMNet import stackhourglass as rawPSMNet", "outputs, rawOutputs = super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti) for rawOutputsSide, side in zip(rawOutputs,", "utils import myUtils from .RawPSMNet import stackhourglass as rawPSMNet from .RawPSMNet_TieCheng import stackhourglass", "nn.AvgPool2d((2, 2)) # input: RGB value range 0~1 # outputs: disparity range 0~self.maxdisp", "evalType='l1', returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch, 8) batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half) scores,", "rawOutputsSide if returnOutputs: if outDispHigh is not None: outputs['outputDispHigh' + side] = outDispHigh", "import time import torch.optim as optim import torch import torch.nn.functional as F import", "range 0~self.maxdisp * self.dispScale / 2 def forward(self, left, right): outDispHighs = super(RawPSMNetDown,", "2) return outDispHighs, outDispLows class PSMNetDown(PSMNet): # dataset: only used for suffix of", "# input: RGB value range 0~1 # outputs: disparity range 0~self.maxdisp * self.dispScale", "/ self.outputMaxDisp) losses = [loss] + losses return [loss.data.item() for loss in losses],", "lossesList): if loss is not None: losses['loss' + suffix + side] = loss", "loss in zip(('', 'DispHigh', 'Disp'), lossesList): if loss is not None: losses['loss' +", "def __init__(self, maxdisp, dispScale, multiple): super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple) self.pool = nn.AvgPool2d((2, 2))", "input: RGB value range 0~1 # outputs: disparity range 0~self.maxdisp * self.dispScale /", "outputs['outputDisp' + suffix + side] = process(output) return losses, outputs def test(self, batch,", "is None for gt in gts]): lossesList, outputsList = self.trainOneSide( *process((inputL, inputR, gts)),", "imgR = batch.highResRGBs() for inputL, inputR, gts, process, side in zip( (imgL, imgR),", "not all([gt is None for gt in gts]): lossesList, outputsList = self.trainOneSide( *process((inputL,", "used for suffix of saveFolderName def __init__(self, maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None,", "super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple) self.pool = nn.AvgPool2d((2, 2)) # input: RGB value range", "self).loss( output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp ) if gt is not None else None)", "for inputL, inputR, gts, process, side in zip( (imgL, imgR), (imgR, imgL), zip(batch.highResDisps(),", "gt, kitti=kitti, outputMaxDisp=outputMaxDisp ) if gt is not None else None) return losses", "output, gt, outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp * 2, self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss( output,", "None else None) return losses def trainOneSide(self, imgL, imgR, gts, returnOutputs=False, kitti=False, weights=(1,", "(imgR, imgL), zip(batch.highResDisps(), batch.lowResDisps()), (lambda im: im, myUtils.flipLR), ('L', 'R') ): if not", "half=batch.half) scores, outputs, rawOutputs = super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti) for rawOutputsSide, side", "output in zip(('High', 'Low'), outputsList): outputs['outputDisp' + suffix + side] = process(output) return", "def __init__(self, maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None, saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda,", "SR import collections import torch.nn.parallel as P from .PSMNet import * class RawPSMNetDown(RawPSMNetScale):", "0~1 # outputs: disparity range 0~self.maxdisp * self.dispScale / 2 def forward(self, left,", "side] = process(output) return losses, outputs def test(self, batch, evalType='l1', returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch,", "None]) with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() self.optimizer.step() dispOuts = [] if returnOutputs:", "if outDispHigh is not None: outputs['outputDispHigh' + side] = outDispHigh / (self.outputMaxDisp *", "weights=(1, 0)): self.optimizer.zero_grad() outDispHighs, outDispLows = self.model.forward(imgL, imgR) losses = self.loss((outDispHighs, outDispLows), gts,", "P from .PSMNet import * class RawPSMNetDown(RawPSMNetScale): def __init__(self, maxdisp, dispScale, multiple): super(RawPSMNetDown,", "# outputs: disparity range 0~self.maxdisp * self.dispScale / 2 def forward(self, left, right):", "+ side] = loss if returnOutputs: for suffix, output in zip(('High', 'Low'), outputsList):", "RGB value range 0~1 # outputs: disparity range 0~self.maxdisp * self.dispScale / 2", "suffix of saveFolderName def __init__(self, maxdisp=192, dispScale=1, cuda=True, half=False, stage='unnamed', dataset=None, saveFolderSuffix=''): super(PSMNetDown,", "time import torch.optim as optim import torch import torch.nn.functional as F import torch.nn", "loss in zip(weights, losses) if loss is not None]) with self.amp_handle.scale_loss(loss, self.optimizer) as", "side in zip( (imgL, imgR), (imgR, imgL), zip(batch.highResDisps(), batch.lowResDisps()), (lambda im: im, myUtils.flipLR),", "for output, gt, outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp * 2, self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss(", "+ suffix + side] = loss if returnOutputs: for suffix, output in zip(('High',", "zip(('High', 'Low'), outputsList): outputs['outputDisp' + suffix + side] = process(output) return losses, outputs", "= self.outputMaxDisp // 2 self.getModel = RawPSMNetDown def loss(self, outputs, gts, kitti=False, outputMaxDisp=None):", "inputR, gts, process, side in zip( (imgL, imgR), (imgR, imgL), zip(batch.highResDisps(), batch.lowResDisps()), (lambda", "raise Exception('Error: outputMaxDisp of PSMNetDown has no use!') losses = [] for output,", "def trainOneSide(self, imgL, imgR, gts, returnOutputs=False, kitti=False, weights=(1, 0)): self.optimizer.zero_grad() outDispHighs, outDispLows =", "dataset, saveFolderSuffix) self.outputMaxDisp = self.outputMaxDisp // 2 self.getModel = RawPSMNetDown def loss(self, outputs,", "cuda=batch.cuda, half=batch.half) scores, outputs, rawOutputs = super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti) for rawOutputsSide,", "in zip(outputs, gts, (self.outputMaxDisp * 2, self.outputMaxDisp)): losses.append(super(PSMNetDown, self).loss( output, gt, kitti=kitti, outputMaxDisp=outputMaxDisp", "import torch.nn.functional as F import torch.nn as nn from evaluation import evalFcn from", "self.pool(disp) / 2) return outDispHighs, outDispLows class PSMNetDown(PSMNet): # dataset: only used for", "scaled_loss: scaled_loss.backward() self.optimizer.step() dispOuts = [] if returnOutputs: with torch.no_grad(): dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp", "outputs = collections.OrderedDict() imgL, imgR = batch.highResRGBs() for inputL, inputR, gts, process, side", "self.getModel = RawPSMNetDown def loss(self, outputs, gts, kitti=False, outputMaxDisp=None): if outputMaxDisp is not", "losses], dispOuts def train(self, batch, returnOutputs=False, kitti=False, weights=(1, 0), progress=0): myUtils.assertBatchLen(batch, 8) self.trainPrepare()", "is not None]) with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() self.optimizer.step() dispOuts = []", "rawOutputsSide is not None: (outDispHigh, outDispLow) = rawOutputsSide if returnOutputs: if outDispHigh is", "stackhourglass as rawPSMNet from .RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng from ..Model import Model", "batch.lowestResDisps(), cuda=batch.cuda, half=batch.half) scores, outputs, rawOutputs = super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti) for", "import collections import torch.nn.parallel as P from .PSMNet import * class RawPSMNetDown(RawPSMNetScale): def", "outDispLow) = rawOutputsSide if returnOutputs: if outDispHigh is not None: outputs['outputDispHigh' + side]", "0)): self.optimizer.zero_grad() outDispHighs, outDispLows = self.model.forward(imgL, imgR) losses = self.loss((outDispHighs, outDispLows), gts, kitti=kitti)", "= self.model.forward(imgL, imgR) losses = self.loss((outDispHighs, outDispLows), gts, kitti=kitti) loss = sum([weight *", "kitti=False): myUtils.assertBatchLen(batch, 8) batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda, half=batch.half) scores, outputs, rawOutputs", "no use!') losses = [] for output, gt, outputMaxDisp in zip(outputs, gts, (self.outputMaxDisp", "kitti=False, outputMaxDisp=None): if outputMaxDisp is not None: raise Exception('Error: outputMaxDisp of PSMNetDown has", "= super(RawPSMNetDown, self).forward(left, right) outDispLows = myUtils.forNestingList(outDispHighs, lambda disp: self.pool(disp) / 2) return", "if rawOutputsSide is not None: (outDispHigh, outDispLow) = rawOutputsSide if returnOutputs: if outDispHigh", "dispScale, cuda, half, stage, dataset, saveFolderSuffix) self.outputMaxDisp = self.outputMaxDisp // 2 self.getModel =", "if loss is not None]) with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() self.optimizer.step() dispOuts", "scores, outputs, rawOutputs = super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti) for rawOutputsSide, side in", "sum([weight * loss for weight, loss in zip(weights, losses) if loss is not", "): if not all([gt is None for gt in gts]): lossesList, outputsList =", "None: (outDispHigh, outDispLow) = rawOutputsSide if returnOutputs: if outDispHigh is not None: outputs['outputDispHigh'", "suffix + side] = loss if returnOutputs: for suffix, output in zip(('High', 'Low'),", "gts)), returnOutputs=returnOutputs, kitti=kitti, weights=weights ) for suffix, loss in zip(('', 'DispHigh', 'Disp'), lossesList):", "[loss.data.item() for loss in losses], dispOuts def train(self, batch, returnOutputs=False, kitti=False, weights=(1, 0),", "= sum([weight * loss for weight, loss in zip(weights, losses) if loss is", "outDispLows class PSMNetDown(PSMNet): # dataset: only used for suffix of saveFolderName def __init__(self,", "stage='unnamed', dataset=None, saveFolderSuffix=''): super(PSMNetDown, self).__init__(maxdisp, dispScale, cuda, half, stage, dataset, saveFolderSuffix) self.outputMaxDisp =", "evalType, returnOutputs, kitti) for rawOutputsSide, side in zip(rawOutputs, ('L', 'R')): if rawOutputsSide is", "from utils import myUtils from .RawPSMNet import stackhourglass as rawPSMNet from .RawPSMNet_TieCheng import", "returnOutputs: with torch.no_grad(): dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2)) dispOuts.append(outDispLows[2].detach() / self.outputMaxDisp) losses =", "outputs: disparity range 0~self.maxdisp * self.dispScale / 2 def forward(self, left, right): outDispHighs", "import * class RawPSMNetDown(RawPSMNetScale): def __init__(self, maxdisp, dispScale, multiple): super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple)", "for weight, loss in zip(weights, losses) if loss is not None]) with self.amp_handle.scale_loss(loss,", "all([gt is None for gt in gts]): lossesList, outputsList = self.trainOneSide( *process((inputL, inputR,", "return outDispHighs, outDispLows class PSMNetDown(PSMNet): # dataset: only used for suffix of saveFolderName", "self.loss((outDispHighs, outDispLows), gts, kitti=kitti) loss = sum([weight * loss for weight, loss in", "losses = myUtils.NameValues() outputs = collections.OrderedDict() imgL, imgR = batch.highResRGBs() for inputL, inputR,", "'Disp'), lossesList): if loss is not None: losses['loss' + suffix + side] =", "kitti) for rawOutputsSide, side in zip(rawOutputs, ('L', 'R')): if rawOutputsSide is not None:", "class PSMNetDown(PSMNet): # dataset: only used for suffix of saveFolderName def __init__(self, maxdisp=192,", "0), progress=0): myUtils.assertBatchLen(batch, 8) self.trainPrepare() losses = myUtils.NameValues() outputs = collections.OrderedDict() imgL, imgR", "collections import torch.nn.parallel as P from .PSMNet import * class RawPSMNetDown(RawPSMNetScale): def __init__(self,", "*process((inputL, inputR, gts)), returnOutputs=returnOutputs, kitti=kitti, weights=weights ) for suffix, loss in zip(('', 'DispHigh',", "inputL, inputR, gts, process, side in zip( (imgL, imgR), (imgR, imgL), zip(batch.highResDisps(), batch.lowResDisps()),", "weights=(1, 0), progress=0): myUtils.assertBatchLen(batch, 8) self.trainPrepare() losses = myUtils.NameValues() outputs = collections.OrderedDict() imgL,", "in zip(weights, losses) if loss is not None]) with self.amp_handle.scale_loss(loss, self.optimizer) as scaled_loss:", "kitti=False, weights=(1, 0), progress=0): myUtils.assertBatchLen(batch, 8) self.trainPrepare() losses = myUtils.NameValues() outputs = collections.OrderedDict()", "process(output) return losses, outputs def test(self, batch, evalType='l1', returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch, 8) batch", "return losses def trainOneSide(self, imgL, imgR, gts, returnOutputs=False, kitti=False, weights=(1, 0)): self.optimizer.zero_grad() outDispHighs,", "batch.lowResDisps()), (lambda im: im, myUtils.flipLR), ('L', 'R') ): if not all([gt is None", "losses def trainOneSide(self, imgL, imgR, gts, returnOutputs=False, kitti=False, weights=(1, 0)): self.optimizer.zero_grad() outDispHighs, outDispLows", "= rawOutputsSide if returnOutputs: if outDispHigh is not None: outputs['outputDispHigh' + side] =", "multiple) self.pool = nn.AvgPool2d((2, 2)) # input: RGB value range 0~1 # outputs:", "RawPSMNetDown(RawPSMNetScale): def __init__(self, maxdisp, dispScale, multiple): super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple) self.pool = nn.AvgPool2d((2,", "rawOutputs = super(PSMNetDown, self).test(batch, evalType, returnOutputs, kitti) for rawOutputsSide, side in zip(rawOutputs, ('L',", "= collections.OrderedDict() imgL, imgR = batch.highResRGBs() for inputL, inputR, gts, process, side in", "self.optimizer.step() dispOuts = [] if returnOutputs: with torch.no_grad(): dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp * 2))", "gts, kitti=kitti) loss = sum([weight * loss for weight, loss in zip(weights, losses)", "F import torch.nn as nn from evaluation import evalFcn from utils import myUtils", "if gt is not None else None) return losses def trainOneSide(self, imgL, imgR,", "if returnOutputs: for suffix, output in zip(('High', 'Low'), outputsList): outputs['outputDisp' + suffix +", "import os import time import torch.optim as optim import torch import torch.nn.functional as", "gts, process, side in zip( (imgL, imgR), (imgR, imgL), zip(batch.highResDisps(), batch.lowResDisps()), (lambda im:", "return [loss.data.item() for loss in losses], dispOuts def train(self, batch, returnOutputs=False, kitti=False, weights=(1,", "scaled_loss.backward() self.optimizer.step() dispOuts = [] if returnOutputs: with torch.no_grad(): dispOuts.append(outDispHighs[2].detach() / (self.outputMaxDisp *", "// 2 self.getModel = RawPSMNetDown def loss(self, outputs, gts, kitti=False, outputMaxDisp=None): if outputMaxDisp", "imgR, gts, returnOutputs=False, kitti=False, weights=(1, 0)): self.optimizer.zero_grad() outDispHighs, outDispLows = self.model.forward(imgL, imgR) losses", "from .RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng from ..Model import Model from .. import", "lossesList, outputsList = self.trainOneSide( *process((inputL, inputR, gts)), returnOutputs=returnOutputs, kitti=kitti, weights=weights ) for suffix,", "import myUtils from .RawPSMNet import stackhourglass as rawPSMNet from .RawPSMNet_TieCheng import stackhourglass as", "if returnOutputs: if outDispHigh is not None: outputs['outputDispHigh' + side] = outDispHigh /", "outputs def test(self, batch, evalType='l1', returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch, 8) batch = myUtils.Batch(batch.highResRGBs() +", "test(self, batch, evalType='l1', returnOutputs=False, kitti=False): myUtils.assertBatchLen(batch, 8) batch = myUtils.Batch(batch.highResRGBs() + batch.lowestResDisps(), cuda=batch.cuda,", "myUtils from .RawPSMNet import stackhourglass as rawPSMNet from .RawPSMNet_TieCheng import stackhourglass as rawPSMNet_TieCheng", "multiple): super(RawPSMNetDown, self).__init__(maxdisp, dispScale, multiple) self.pool = nn.AvgPool2d((2, 2)) # input: RGB value", "in zip(rawOutputs, ('L', 'R')): if rawOutputsSide is not None: (outDispHigh, outDispLow) = rawOutputsSide" ]
[ "registered. def _ty_to_state_dict(v): return serialization.to_state_dict( {k: v for k, v in hk.data_structures.to_mutable_dict(v).items()}) def", "/= labels.shape[0] return softmax_xent @jax.jit def update(params, key, state, batch, meta_params): opt =", "state): logging.info(\"Restoring state %s:\", path) with filesystem.file_open(path, \"rb\") as fp: state_new = serialization.from_bytes(state,", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "remap_label = lambda x: {\"image\": x[\"image\"], \"label\": x[\"label\"]} def data(split): dataset = tfds.load(\"cifar10\",", "mlp = hk.Sequential([ hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu,", "filesystem.file_open(path, \"rb\") as fp: state_new = serialization.from_bytes(state, fp.read()) tree = jax.tree_structure(state) leaves_new =", "]) return mlp(x) @jax.jit def loss(params, key, batch): net = hk.transform(hk_forward_fn) logits =", "def _ty_to_state_dict(v): return serialization.to_state_dict( {k: v for k, v in hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target,", "jax.tree_unflatten(tree, leaves_new) def get_data_iterators(fake_data=False): \"\"\"Get training and test data iterators.\"\"\" batch_size = 128", "state, params) new_params = optax.apply_updates(params, updates) return new_params, new_state, l def save_state(path, state):", "this file except in compliance with the License. # You may obtain a", "with filesystem.file_open(path, \"wb\") as fp: fp.write(serialization.to_bytes(state)) def load_state(path, state): logging.info(\"Restoring state %s:\", path)", "function for haiku.\"\"\" x = batch[\"image\"].astype(jnp.float32) / 255. mlp = hk.Sequential([ hk.Conv2D(64, (3,", "return softmax_xent @jax.jit def update(params, key, state, batch, meta_params): opt = optax.adam(meta_params[\"learning_rate\"]) l,", "# You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0", "tfds.load(\"cifar10\", split=split) iterator = iter( tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size * 10).batch(batch_size).map(remap_label))) return iterator return", "jax.nn.one_hot(batch[\"label\"], 10) softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits)) softmax_xent /= labels.shape[0] return softmax_xent @jax.jit", "\"label\": x[\"label\"]} def data(split): dataset = tfds.load(\"cifar10\", split=split) iterator = iter( tfds.as_numpy( dataset.repeat(-1).shuffle(", "ANY KIND, either express or implied. # See the License for the specific", "/ 255. mlp = hk.Sequential([ hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3),", "l def save_state(path, state): filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path, \"wb\") as fp: fp.write(serialization.to_bytes(state)) def load_state(path,", "softmax_xent /= labels.shape[0] return softmax_xent @jax.jit def update(params, key, state, batch, meta_params): opt", "dataset.repeat(-1).shuffle( batch_size * 10).batch(batch_size).map(remap_label))) return iterator return data(\"train\"), data(\"test\") else: def data(): while", "License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "@jax.jit def update(params, key, state, batch, meta_params): opt = optax.adam(meta_params[\"learning_rate\"]) l, grad =", "filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path, \"wb\") as fp: fp.write(serialization.to_bytes(state)) def load_state(path, state): logging.info(\"Restoring state %s:\",", "{k: v for k, v in hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target, d): return HKTree( **", "(3, 3), stride=1), jax.nn.relu, functools.partial(jnp.mean, axis=(1, 2)), hk.Linear(10), ]) return mlp(x) @jax.jit def", "load_state(path, state): logging.info(\"Restoring state %s:\", path) with filesystem.file_open(path, \"rb\") as fp: state_new =", "copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "leaves_new = jax.tree_leaves(state_new) return jax.tree_unflatten(tree, leaves_new) def get_data_iterators(fake_data=False): \"\"\"Get training and test data", "batch) labels = jax.nn.one_hot(batch[\"label\"], 10) softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits)) softmax_xent /= labels.shape[0]", "data(): while True: yield { \"image\": onp.zeros([batch_size, 32, 32, 3]), \"label\": onp.zeros([batch_size], dtype=onp.int32)", "return new_params, new_state, l def save_state(path, state): filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path, \"wb\") as fp:", "logging.info(\"Restoring state %s:\", path) with filesystem.file_open(path, \"rb\") as fp: state_new = serialization.from_bytes(state, fp.read())", "OF ANY KIND, either express or implied. # See the License for the", "Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0", "training and test data iterators.\"\"\" batch_size = 128 if not fake_data: remap_label =", "updates) return new_params, new_state, l def save_state(path, state): filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path, \"wb\") as", "data(\"test\") else: def data(): while True: yield { \"image\": onp.zeros([batch_size, 32, 32, 3]),", "LLC # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "(k, v) in d.items()}) serialization.register_serialization_state( HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True) def hk_forward_fn(batch): \"\"\"Forward function", "= 128 if not fake_data: remap_label = lambda x: {\"image\": x[\"image\"], \"label\": x[\"label\"]}", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "logging from flax import serialization import haiku as hk import jax import jax.numpy", "get_data_iterators(fake_data=False): \"\"\"Get training and test data iterators.\"\"\" batch_size = 128 if not fake_data:", "at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3,", "jax.numpy as jnp from learned_optimization import filesystem import numpy as onp import optax", "v) for (k, v) in d.items()}) serialization.register_serialization_state( HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True) def hk_forward_fn(batch):", "_ty_from_state_dict(target, d): return HKTree( ** {k: serialization.from_state_dict(target[k], v) for (k, v) in d.items()})", "= jax.value_and_grad(loss)(params, key, batch) updates, new_state = opt.update(grad, state, params) new_params = optax.apply_updates(params,", "iterator = iter( tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size * 10).batch(batch_size).map(remap_label))) return iterator return data(\"train\"), data(\"test\")", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "batch) updates, new_state = opt.update(grad, state, params) new_params = optax.apply_updates(params, updates) return new_params,", "= hk.Sequential([ hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, hk.Conv2D(64,", "limitations under the License. \"\"\"Common code for the simple cnn example.\"\"\" import functools", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "the specific language governing permissions and # limitations under the License. \"\"\"Common code", "optax.apply_updates(params, updates) return new_params, new_state, l def save_state(path, state): filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path, \"wb\")", "required by applicable law or agreed to in writing, software # distributed under", "{k: serialization.from_state_dict(target[k], v) for (k, v) in d.items()}) serialization.register_serialization_state( HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True)", "serialization.from_bytes(state, fp.read()) tree = jax.tree_structure(state) leaves_new = jax.tree_leaves(state_new) return jax.tree_unflatten(tree, leaves_new) def get_data_iterators(fake_data=False):", "jax.tree_leaves(state_new) return jax.tree_unflatten(tree, leaves_new) def get_data_iterators(fake_data=False): \"\"\"Get training and test data iterators.\"\"\" batch_size", "applicable law or agreed to in writing, software # distributed under the License", "for (k, v) in d.items()}) serialization.register_serialization_state( HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True) def hk_forward_fn(batch): \"\"\"Forward", "labels = jax.nn.one_hot(batch[\"label\"], 10) softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits)) softmax_xent /= labels.shape[0] return", "x[\"label\"]} def data(split): dataset = tfds.load(\"cifar10\", split=split) iterator = iter( tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size", "while True: yield { \"image\": onp.zeros([batch_size, 32, 32, 3]), \"label\": onp.zeros([batch_size], dtype=onp.int32) }", "in d.items()}) serialization.register_serialization_state( HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True) def hk_forward_fn(batch): \"\"\"Forward function for haiku.\"\"\"", "or agreed to in writing, software # distributed under the License is distributed", "x = batch[\"image\"].astype(jnp.float32) / 255. mlp = hk.Sequential([ hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu,", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "hk.Linear(10), ]) return mlp(x) @jax.jit def loss(params, key, batch): net = hk.transform(hk_forward_fn) logits", "batch_size * 10).batch(batch_size).map(remap_label))) return iterator return data(\"train\"), data(\"test\") else: def data(): while True:", "v for k, v in hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target, d): return HKTree( ** {k:", "tree = jax.tree_structure(state) leaves_new = jax.tree_leaves(state_new) return jax.tree_unflatten(tree, leaves_new) def get_data_iterators(fake_data=False): \"\"\"Get training", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size * 10).batch(batch_size).map(remap_label))) return iterator return data(\"train\"), data(\"test\") else: def data():", "softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits)) softmax_xent /= labels.shape[0] return softmax_xent @jax.jit def update(params,", "iter( tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size * 10).batch(batch_size).map(remap_label))) return iterator return data(\"train\"), data(\"test\") else: def", "simple cnn example.\"\"\" import functools import os from absl import logging from flax", "hk_forward_fn(batch): \"\"\"Forward function for haiku.\"\"\" x = batch[\"image\"].astype(jnp.float32) / 255. mlp = hk.Sequential([", "3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu,", "License. # You may obtain a copy of the License at # #", "jax.nn.relu, functools.partial(jnp.mean, axis=(1, 2)), hk.Linear(10), ]) return mlp(x) @jax.jit def loss(params, key, batch):", "= lambda x: {\"image\": x[\"image\"], \"label\": x[\"label\"]} def data(split): dataset = tfds.load(\"cifar10\", split=split)", "compliance with the License. # You may obtain a copy of the License", "filesystem import numpy as onp import optax import tensorflow_datasets as tfds HKTree =", "def save_state(path, state): filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path, \"wb\") as fp: fp.write(serialization.to_bytes(state)) def load_state(path, state):", "v in hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target, d): return HKTree( ** {k: serialization.from_state_dict(target[k], v) for", "d): return HKTree( ** {k: serialization.from_state_dict(target[k], v) for (k, v) in d.items()}) serialization.register_serialization_state(", "\"wb\") as fp: fp.write(serialization.to_bytes(state)) def load_state(path, state): logging.info(\"Restoring state %s:\", path) with filesystem.file_open(path,", "filesystem.file_open(path, \"wb\") as fp: fp.write(serialization.to_bytes(state)) def load_state(path, state): logging.info(\"Restoring state %s:\", path) with", "axis=(1, 2)), hk.Linear(10), ]) return mlp(x) @jax.jit def loss(params, key, batch): net =", "not registered. def _ty_to_state_dict(v): return serialization.to_state_dict( {k: v for k, v in hk.data_structures.to_mutable_dict(v).items()})", "updates, new_state = opt.update(grad, state, params) new_params = optax.apply_updates(params, updates) return new_params, new_state,", "if not fake_data: remap_label = lambda x: {\"image\": x[\"image\"], \"label\": x[\"label\"]} def data(split):", "net.apply(params, key, batch) labels = jax.nn.one_hot(batch[\"label\"], 10) softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits)) softmax_xent", "def get_data_iterators(fake_data=False): \"\"\"Get training and test data iterators.\"\"\" batch_size = 128 if not", "and # limitations under the License. \"\"\"Common code for the simple cnn example.\"\"\"", "import filesystem import numpy as onp import optax import tensorflow_datasets as tfds HKTree", "data(\"train\"), data(\"test\") else: def data(): while True: yield { \"image\": onp.zeros([batch_size, 32, 32,", "return HKTree( ** {k: serialization.from_state_dict(target[k], v) for (k, v) in d.items()}) serialization.register_serialization_state( HKTree,", "# Copyright 2021 Google LLC # # Licensed under the Apache License, Version", "batch, meta_params): opt = optax.adam(meta_params[\"learning_rate\"]) l, grad = jax.value_and_grad(loss)(params, key, batch) updates, new_state", "flax for serialization but haiku's data struct is not registered. def _ty_to_state_dict(v): return", "serialization import haiku as hk import jax import jax.numpy as jnp from learned_optimization", "specific language governing permissions and # limitations under the License. \"\"\"Common code for", "not use this file except in compliance with the License. # You may", "import optax import tensorflow_datasets as tfds HKTree = hk.data_structures.to_immutable_dict({}).__class__ # We use flax", "License, Version 2.0 (the \"License\"); # you may not use this file except", "hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3),", "logits = net.apply(params, key, batch) labels = jax.nn.one_hot(batch[\"label\"], 10) softmax_xent = -jnp.sum(labels *", "labels.shape[0] return softmax_xent @jax.jit def update(params, key, state, batch, meta_params): opt = optax.adam(meta_params[\"learning_rate\"])", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "split=split) iterator = iter( tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size * 10).batch(batch_size).map(remap_label))) return iterator return data(\"train\"),", "= jax.nn.one_hot(batch[\"label\"], 10) softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits)) softmax_xent /= labels.shape[0] return softmax_xent", "jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, functools.partial(jnp.mean, axis=(1, 2)), hk.Linear(10), ]) return mlp(x)", "_ty_to_state_dict, _ty_from_state_dict, override=True) def hk_forward_fn(batch): \"\"\"Forward function for haiku.\"\"\" x = batch[\"image\"].astype(jnp.float32) /", "hk import jax import jax.numpy as jnp from learned_optimization import filesystem import numpy", "@jax.jit def loss(params, key, batch): net = hk.transform(hk_forward_fn) logits = net.apply(params, key, batch)", "# # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "and test data iterators.\"\"\" batch_size = 128 if not fake_data: remap_label = lambda", "https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "# you may not use this file except in compliance with the License.", "data(split): dataset = tfds.load(\"cifar10\", split=split) iterator = iter( tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size * 10).batch(batch_size).map(remap_label)))", "serialization.from_state_dict(target[k], v) for (k, v) in d.items()}) serialization.register_serialization_state( HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True) def", "serialization but haiku's data struct is not registered. def _ty_to_state_dict(v): return serialization.to_state_dict( {k:", "agreed to in writing, software # distributed under the License is distributed on", "{\"image\": x[\"image\"], \"label\": x[\"label\"]} def data(split): dataset = tfds.load(\"cifar10\", split=split) iterator = iter(", "save_state(path, state): filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path, \"wb\") as fp: fp.write(serialization.to_bytes(state)) def load_state(path, state): logging.info(\"Restoring", "def update(params, key, state, batch, meta_params): opt = optax.adam(meta_params[\"learning_rate\"]) l, grad = jax.value_and_grad(loss)(params,", "\"\"\"Forward function for haiku.\"\"\" x = batch[\"image\"].astype(jnp.float32) / 255. mlp = hk.Sequential([ hk.Conv2D(64,", "obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless", "(the \"License\"); # you may not use this file except in compliance with", "path) with filesystem.file_open(path, \"rb\") as fp: state_new = serialization.from_bytes(state, fp.read()) tree = jax.tree_structure(state)", "as fp: state_new = serialization.from_bytes(state, fp.read()) tree = jax.tree_structure(state) leaves_new = jax.tree_leaves(state_new) return", "functools import os from absl import logging from flax import serialization import haiku", "= -jnp.sum(labels * jax.nn.log_softmax(logits)) softmax_xent /= labels.shape[0] return softmax_xent @jax.jit def update(params, key,", "128 if not fake_data: remap_label = lambda x: {\"image\": x[\"image\"], \"label\": x[\"label\"]} def", "haiku.\"\"\" x = batch[\"image\"].astype(jnp.float32) / 255. mlp = hk.Sequential([ hk.Conv2D(64, (3, 3), stride=2),", "# Unless required by applicable law or agreed to in writing, software #", "opt.update(grad, state, params) new_params = optax.apply_updates(params, updates) return new_params, new_state, l def save_state(path,", "def loss(params, key, batch): net = hk.transform(hk_forward_fn) logits = net.apply(params, key, batch) labels", "by applicable law or agreed to in writing, software # distributed under the", "= batch[\"image\"].astype(jnp.float32) / 255. mlp = hk.Sequential([ hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64,", "import jax import jax.numpy as jnp from learned_optimization import filesystem import numpy as", "jax import jax.numpy as jnp from learned_optimization import filesystem import numpy as onp", "hk.Sequential([ hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, hk.Conv2D(64, (3,", "_ty_to_state_dict(v): return serialization.to_state_dict( {k: v for k, v in hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target, d):", "10) softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits)) softmax_xent /= labels.shape[0] return softmax_xent @jax.jit def", "stride=1), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, functools.partial(jnp.mean,", "file except in compliance with the License. # You may obtain a copy", "batch[\"image\"].astype(jnp.float32) / 255. mlp = hk.Sequential([ hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3,", "import serialization import haiku as hk import jax import jax.numpy as jnp from", "License for the specific language governing permissions and # limitations under the License.", "optax import tensorflow_datasets as tfds HKTree = hk.data_structures.to_immutable_dict({}).__class__ # We use flax for", "to in writing, software # distributed under the License is distributed on an", "\"\"\"Get training and test data iterators.\"\"\" batch_size = 128 if not fake_data: remap_label", "is not registered. def _ty_to_state_dict(v): return serialization.to_state_dict( {k: v for k, v in", "implied. # See the License for the specific language governing permissions and #", "data struct is not registered. def _ty_to_state_dict(v): return serialization.to_state_dict( {k: v for k,", "\"License\"); # you may not use this file except in compliance with the", "not fake_data: remap_label = lambda x: {\"image\": x[\"image\"], \"label\": x[\"label\"]} def data(split): dataset", "hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target, d): return HKTree( ** {k: serialization.from_state_dict(target[k], v) for (k, v)", "10).batch(batch_size).map(remap_label))) return iterator return data(\"train\"), data(\"test\") else: def data(): while True: yield {", "data iterators.\"\"\" batch_size = 128 if not fake_data: remap_label = lambda x: {\"image\":", "= optax.adam(meta_params[\"learning_rate\"]) l, grad = jax.value_and_grad(loss)(params, key, batch) updates, new_state = opt.update(grad, state,", "%s:\", path) with filesystem.file_open(path, \"rb\") as fp: state_new = serialization.from_bytes(state, fp.read()) tree =", "stride=1), jax.nn.relu, functools.partial(jnp.mean, axis=(1, 2)), hk.Linear(10), ]) return mlp(x) @jax.jit def loss(params, key,", "x: {\"image\": x[\"image\"], \"label\": x[\"label\"]} def data(split): dataset = tfds.load(\"cifar10\", split=split) iterator =", "as jnp from learned_optimization import filesystem import numpy as onp import optax import", "functools.partial(jnp.mean, axis=(1, 2)), hk.Linear(10), ]) return mlp(x) @jax.jit def loss(params, key, batch): net", "or implied. # See the License for the specific language governing permissions and", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "jax.tree_structure(state) leaves_new = jax.tree_leaves(state_new) return jax.tree_unflatten(tree, leaves_new) def get_data_iterators(fake_data=False): \"\"\"Get training and test", "k, v in hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target, d): return HKTree( ** {k: serialization.from_state_dict(target[k], v)", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "in writing, software # distributed under the License is distributed on an \"AS", "flax import serialization import haiku as hk import jax import jax.numpy as jnp", "batch): net = hk.transform(hk_forward_fn) logits = net.apply(params, key, batch) labels = jax.nn.one_hot(batch[\"label\"], 10)", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "state, batch, meta_params): opt = optax.adam(meta_params[\"learning_rate\"]) l, grad = jax.value_and_grad(loss)(params, key, batch) updates,", "iterators.\"\"\" batch_size = 128 if not fake_data: remap_label = lambda x: {\"image\": x[\"image\"],", "v) in d.items()}) serialization.register_serialization_state( HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True) def hk_forward_fn(batch): \"\"\"Forward function for", "for haiku.\"\"\" x = batch[\"image\"].astype(jnp.float32) / 255. mlp = hk.Sequential([ hk.Conv2D(64, (3, 3),", "for the simple cnn example.\"\"\" import functools import os from absl import logging", "batch_size = 128 if not fake_data: remap_label = lambda x: {\"image\": x[\"image\"], \"label\":", "dataset = tfds.load(\"cifar10\", split=split) iterator = iter( tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size * 10).batch(batch_size).map(remap_label))) return", "= jax.tree_structure(state) leaves_new = jax.tree_leaves(state_new) return jax.tree_unflatten(tree, leaves_new) def get_data_iterators(fake_data=False): \"\"\"Get training and", "{ \"image\": onp.zeros([batch_size, 32, 32, 3]), \"label\": onp.zeros([batch_size], dtype=onp.int32) } return data(), data()", "grad = jax.value_and_grad(loss)(params, key, batch) updates, new_state = opt.update(grad, state, params) new_params =", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "(3, 3), stride=1), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1),", "3), stride=1), jax.nn.relu, functools.partial(jnp.mean, axis=(1, 2)), hk.Linear(10), ]) return mlp(x) @jax.jit def loss(params,", "# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "opt = optax.adam(meta_params[\"learning_rate\"]) l, grad = jax.value_and_grad(loss)(params, key, batch) updates, new_state = opt.update(grad,", "import haiku as hk import jax import jax.numpy as jnp from learned_optimization import", "Google LLC # # Licensed under the Apache License, Version 2.0 (the \"License\");", "for serialization but haiku's data struct is not registered. def _ty_to_state_dict(v): return serialization.to_state_dict(", "softmax_xent @jax.jit def update(params, key, state, batch, meta_params): opt = optax.adam(meta_params[\"learning_rate\"]) l, grad", "hk.transform(hk_forward_fn) logits = net.apply(params, key, batch) labels = jax.nn.one_hot(batch[\"label\"], 10) softmax_xent = -jnp.sum(labels", "params) new_params = optax.apply_updates(params, updates) return new_params, new_state, l def save_state(path, state): filesystem.make_dirs(os.path.dirname(path))", "else: def data(): while True: yield { \"image\": onp.zeros([batch_size, 32, 32, 3]), \"label\":", "use this file except in compliance with the License. # You may obtain", "\"rb\") as fp: state_new = serialization.from_bytes(state, fp.read()) tree = jax.tree_structure(state) leaves_new = jax.tree_leaves(state_new)", "import tensorflow_datasets as tfds HKTree = hk.data_structures.to_immutable_dict({}).__class__ # We use flax for serialization", "** {k: serialization.from_state_dict(target[k], v) for (k, v) in d.items()}) serialization.register_serialization_state( HKTree, _ty_to_state_dict, _ty_from_state_dict,", "(3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, functools.partial(jnp.mean, axis=(1, 2)), hk.Linear(10),", "You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 #", "yield { \"image\": onp.zeros([batch_size, 32, 32, 3]), \"label\": onp.zeros([batch_size], dtype=onp.int32) } return data(),", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "fp: state_new = serialization.from_bytes(state, fp.read()) tree = jax.tree_structure(state) leaves_new = jax.tree_leaves(state_new) return jax.tree_unflatten(tree,", "return mlp(x) @jax.jit def loss(params, key, batch): net = hk.transform(hk_forward_fn) logits = net.apply(params,", "override=True) def hk_forward_fn(batch): \"\"\"Forward function for haiku.\"\"\" x = batch[\"image\"].astype(jnp.float32) / 255. mlp", "os from absl import logging from flax import serialization import haiku as hk", "2.0 (the \"License\"); # you may not use this file except in compliance", "2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the", "as hk import jax import jax.numpy as jnp from learned_optimization import filesystem import", "onp import optax import tensorflow_datasets as tfds HKTree = hk.data_structures.to_immutable_dict({}).__class__ # We use", "-jnp.sum(labels * jax.nn.log_softmax(logits)) softmax_xent /= labels.shape[0] return softmax_xent @jax.jit def update(params, key, state,", "3), stride=1), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu,", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "numpy as onp import optax import tensorflow_datasets as tfds HKTree = hk.data_structures.to_immutable_dict({}).__class__ #", "key, batch) labels = jax.nn.one_hot(batch[\"label\"], 10) softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits)) softmax_xent /=", "_ty_from_state_dict, override=True) def hk_forward_fn(batch): \"\"\"Forward function for haiku.\"\"\" x = batch[\"image\"].astype(jnp.float32) / 255.", "# # Unless required by applicable law or agreed to in writing, software", "for the specific language governing permissions and # limitations under the License. \"\"\"Common", "255. mlp = hk.Sequential([ hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1),", "key, batch) updates, new_state = opt.update(grad, state, params) new_params = optax.apply_updates(params, updates) return", "express or implied. # See the License for the specific language governing permissions", "struct is not registered. def _ty_to_state_dict(v): return serialization.to_state_dict( {k: v for k, v", "hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, functools.partial(jnp.mean, axis=(1, 2)),", "the simple cnn example.\"\"\" import functools import os from absl import logging from", "the License. \"\"\"Common code for the simple cnn example.\"\"\" import functools import os", "either express or implied. # See the License for the specific language governing", "import os from absl import logging from flax import serialization import haiku as", "serialization.register_serialization_state( HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True) def hk_forward_fn(batch): \"\"\"Forward function for haiku.\"\"\" x =", "fp.read()) tree = jax.tree_structure(state) leaves_new = jax.tree_leaves(state_new) return jax.tree_unflatten(tree, leaves_new) def get_data_iterators(fake_data=False): \"\"\"Get", "import logging from flax import serialization import haiku as hk import jax import", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "new_params = optax.apply_updates(params, updates) return new_params, new_state, l def save_state(path, state): filesystem.make_dirs(os.path.dirname(path)) with", "optax.adam(meta_params[\"learning_rate\"]) l, grad = jax.value_and_grad(loss)(params, key, batch) updates, new_state = opt.update(grad, state, params)", "jax.nn.log_softmax(logits)) softmax_xent /= labels.shape[0] return softmax_xent @jax.jit def update(params, key, state, batch, meta_params):", "hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, functools.partial(jnp.mean, axis=(1, 2)), hk.Linear(10), ]) return mlp(x) @jax.jit", "* jax.nn.log_softmax(logits)) softmax_xent /= labels.shape[0] return softmax_xent @jax.jit def update(params, key, state, batch,", "serialization.to_state_dict( {k: v for k, v in hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target, d): return HKTree(", "the License. # You may obtain a copy of the License at #", "leaves_new) def get_data_iterators(fake_data=False): \"\"\"Get training and test data iterators.\"\"\" batch_size = 128 if", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "def _ty_from_state_dict(target, d): return HKTree( ** {k: serialization.from_state_dict(target[k], v) for (k, v) in", "lambda x: {\"image\": x[\"image\"], \"label\": x[\"label\"]} def data(split): dataset = tfds.load(\"cifar10\", split=split) iterator", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "return serialization.to_state_dict( {k: v for k, v in hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target, d): return", "tensorflow_datasets as tfds HKTree = hk.data_structures.to_immutable_dict({}).__class__ # We use flax for serialization but", "new_state, l def save_state(path, state): filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path, \"wb\") as fp: fp.write(serialization.to_bytes(state)) def", "fake_data: remap_label = lambda x: {\"image\": x[\"image\"], \"label\": x[\"label\"]} def data(split): dataset =", "as fp: fp.write(serialization.to_bytes(state)) def load_state(path, state): logging.info(\"Restoring state %s:\", path) with filesystem.file_open(path, \"rb\")", "with the License. # You may obtain a copy of the License at", "fp.write(serialization.to_bytes(state)) def load_state(path, state): logging.info(\"Restoring state %s:\", path) with filesystem.file_open(path, \"rb\") as fp:", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "HKTree( ** {k: serialization.from_state_dict(target[k], v) for (k, v) in d.items()}) serialization.register_serialization_state( HKTree, _ty_to_state_dict,", "return jax.tree_unflatten(tree, leaves_new) def get_data_iterators(fake_data=False): \"\"\"Get training and test data iterators.\"\"\" batch_size =", "haiku as hk import jax import jax.numpy as jnp from learned_optimization import filesystem", "may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # #", "under the License. \"\"\"Common code for the simple cnn example.\"\"\" import functools import", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "as tfds HKTree = hk.data_structures.to_immutable_dict({}).__class__ # We use flax for serialization but haiku's", "test data iterators.\"\"\" batch_size = 128 if not fake_data: remap_label = lambda x:", "new_state = opt.update(grad, state, params) new_params = optax.apply_updates(params, updates) return new_params, new_state, l", "= tfds.load(\"cifar10\", split=split) iterator = iter( tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size * 10).batch(batch_size).map(remap_label))) return iterator", "= iter( tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size * 10).batch(batch_size).map(remap_label))) return iterator return data(\"train\"), data(\"test\") else:", "as onp import optax import tensorflow_datasets as tfds HKTree = hk.data_structures.to_immutable_dict({}).__class__ # We", "= opt.update(grad, state, params) new_params = optax.apply_updates(params, updates) return new_params, new_state, l def", "d.items()}) serialization.register_serialization_state( HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True) def hk_forward_fn(batch): \"\"\"Forward function for haiku.\"\"\" x", "haiku's data struct is not registered. def _ty_to_state_dict(v): return serialization.to_state_dict( {k: v for", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "2)), hk.Linear(10), ]) return mlp(x) @jax.jit def loss(params, key, batch): net = hk.transform(hk_forward_fn)", "License. \"\"\"Common code for the simple cnn example.\"\"\" import functools import os from", "code for the simple cnn example.\"\"\" import functools import os from absl import", "hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, hk.Conv2D(64, (3, 3),", "key, state, batch, meta_params): opt = optax.adam(meta_params[\"learning_rate\"]) l, grad = jax.value_and_grad(loss)(params, key, batch)", "True: yield { \"image\": onp.zeros([batch_size, 32, 32, 3]), \"label\": onp.zeros([batch_size], dtype=onp.int32) } return", "update(params, key, state, batch, meta_params): opt = optax.adam(meta_params[\"learning_rate\"]) l, grad = jax.value_and_grad(loss)(params, key,", "l, grad = jax.value_and_grad(loss)(params, key, batch) updates, new_state = opt.update(grad, state, params) new_params", "governing permissions and # limitations under the License. \"\"\"Common code for the simple", "from flax import serialization import haiku as hk import jax import jax.numpy as", "cnn example.\"\"\" import functools import os from absl import logging from flax import", "net = hk.transform(hk_forward_fn) logits = net.apply(params, key, batch) labels = jax.nn.one_hot(batch[\"label\"], 10) softmax_xent", "tfds HKTree = hk.data_structures.to_immutable_dict({}).__class__ # We use flax for serialization but haiku's data", "in compliance with the License. # You may obtain a copy of the", "in hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target, d): return HKTree( ** {k: serialization.from_state_dict(target[k], v) for (k,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "import functools import os from absl import logging from flax import serialization import", "= hk.data_structures.to_immutable_dict({}).__class__ # We use flax for serialization but haiku's data struct is", "= optax.apply_updates(params, updates) return new_params, new_state, l def save_state(path, state): filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path,", "use flax for serialization but haiku's data struct is not registered. def _ty_to_state_dict(v):", "import jax.numpy as jnp from learned_optimization import filesystem import numpy as onp import", "See the License for the specific language governing permissions and # limitations under", "= serialization.from_bytes(state, fp.read()) tree = jax.tree_structure(state) leaves_new = jax.tree_leaves(state_new) return jax.tree_unflatten(tree, leaves_new) def", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, functools.partial(jnp.mean, axis=(1, 2)), hk.Linear(10), ])", "fp: fp.write(serialization.to_bytes(state)) def load_state(path, state): logging.info(\"Restoring state %s:\", path) with filesystem.file_open(path, \"rb\") as", "= jax.tree_leaves(state_new) return jax.tree_unflatten(tree, leaves_new) def get_data_iterators(fake_data=False): \"\"\"Get training and test data iterators.\"\"\"", "meta_params): opt = optax.adam(meta_params[\"learning_rate\"]) l, grad = jax.value_and_grad(loss)(params, key, batch) updates, new_state =", "return data(\"train\"), data(\"test\") else: def data(): while True: yield { \"image\": onp.zeros([batch_size, 32,", "key, batch): net = hk.transform(hk_forward_fn) logits = net.apply(params, key, batch) labels = jax.nn.one_hot(batch[\"label\"],", "import numpy as onp import optax import tensorflow_datasets as tfds HKTree = hk.data_structures.to_immutable_dict({}).__class__", "hk.data_structures.to_immutable_dict({}).__class__ # We use flax for serialization but haiku's data struct is not", "of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "= hk.transform(hk_forward_fn) logits = net.apply(params, key, batch) labels = jax.nn.one_hot(batch[\"label\"], 10) softmax_xent =", "= net.apply(params, key, batch) labels = jax.nn.one_hot(batch[\"label\"], 10) softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))", "def load_state(path, state): logging.info(\"Restoring state %s:\", path) with filesystem.file_open(path, \"rb\") as fp: state_new", "def data(): while True: yield { \"image\": onp.zeros([batch_size, 32, 32, 3]), \"label\": onp.zeros([batch_size],", "from learned_optimization import filesystem import numpy as onp import optax import tensorflow_datasets as", "Version 2.0 (the \"License\"); # you may not use this file except in", "state %s:\", path) with filesystem.file_open(path, \"rb\") as fp: state_new = serialization.from_bytes(state, fp.read()) tree", "except in compliance with the License. # You may obtain a copy of", "new_params, new_state, l def save_state(path, state): filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path, \"wb\") as fp: fp.write(serialization.to_bytes(state))", "state_new = serialization.from_bytes(state, fp.read()) tree = jax.tree_structure(state) leaves_new = jax.tree_leaves(state_new) return jax.tree_unflatten(tree, leaves_new)", "def data(split): dataset = tfds.load(\"cifar10\", split=split) iterator = iter( tfds.as_numpy( dataset.repeat(-1).shuffle( batch_size *", "x[\"image\"], \"label\": x[\"label\"]} def data(split): dataset = tfds.load(\"cifar10\", split=split) iterator = iter( tfds.as_numpy(", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "(3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=2),", "\"\"\"Common code for the simple cnn example.\"\"\" import functools import os from absl", "HKTree = hk.data_structures.to_immutable_dict({}).__class__ # We use flax for serialization but haiku's data struct", "learned_optimization import filesystem import numpy as onp import optax import tensorflow_datasets as tfds", "stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64,", "with filesystem.file_open(path, \"rb\") as fp: state_new = serialization.from_bytes(state, fp.read()) tree = jax.tree_structure(state) leaves_new", "language governing permissions and # limitations under the License. \"\"\"Common code for the", "# coding=utf-8 # Copyright 2021 Google LLC # # Licensed under the Apache", "a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# We use flax for serialization but haiku's data struct is not registered.", "but haiku's data struct is not registered. def _ty_to_state_dict(v): return serialization.to_state_dict( {k: v", "for k, v in hk.data_structures.to_mutable_dict(v).items()}) def _ty_from_state_dict(target, d): return HKTree( ** {k: serialization.from_state_dict(target[k],", "state): filesystem.make_dirs(os.path.dirname(path)) with filesystem.file_open(path, \"wb\") as fp: fp.write(serialization.to_bytes(state)) def load_state(path, state): logging.info(\"Restoring state", "jax.value_and_grad(loss)(params, key, batch) updates, new_state = opt.update(grad, state, params) new_params = optax.apply_updates(params, updates)", "iterator return data(\"train\"), data(\"test\") else: def data(): while True: yield { \"image\": onp.zeros([batch_size,", "jnp from learned_optimization import filesystem import numpy as onp import optax import tensorflow_datasets", "def hk_forward_fn(batch): \"\"\"Forward function for haiku.\"\"\" x = batch[\"image\"].astype(jnp.float32) / 255. mlp =", "HKTree, _ty_to_state_dict, _ty_from_state_dict, override=True) def hk_forward_fn(batch): \"\"\"Forward function for haiku.\"\"\" x = batch[\"image\"].astype(jnp.float32)", "# limitations under the License. \"\"\"Common code for the simple cnn example.\"\"\" import", "jax.nn.relu, hk.Conv2D(64, (3, 3), stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, functools.partial(jnp.mean, axis=(1,", "* 10).batch(batch_size).map(remap_label))) return iterator return data(\"train\"), data(\"test\") else: def data(): while True: yield", "permissions and # limitations under the License. \"\"\"Common code for the simple cnn", "coding=utf-8 # Copyright 2021 Google LLC # # Licensed under the Apache License,", "stride=2), jax.nn.relu, hk.Conv2D(64, (3, 3), stride=1), jax.nn.relu, functools.partial(jnp.mean, axis=(1, 2)), hk.Linear(10), ]) return", "We use flax for serialization but haiku's data struct is not registered. def", "example.\"\"\" import functools import os from absl import logging from flax import serialization", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "from absl import logging from flax import serialization import haiku as hk import", "return iterator return data(\"train\"), data(\"test\") else: def data(): while True: yield { \"image\":", "loss(params, key, batch): net = hk.transform(hk_forward_fn) logits = net.apply(params, key, batch) labels =", "mlp(x) @jax.jit def loss(params, key, batch): net = hk.transform(hk_forward_fn) logits = net.apply(params, key,", "absl import logging from flax import serialization import haiku as hk import jax" ]
[ "# @Project : robust_python # @File : zero_copy_prc1.py # @Version : V0.0.1 #", "#!/user/bin/env python # -*-coding:utf-8 -*- # @CreateTime : 2021/10/25 22:52 # @Author :", "# @File : zero_copy_prc1.py # @Version : V0.0.1 # @Desc : ? #", "view: \", chunk.tobytes()) print(\"Underlying data: \", chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像", "它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值 my_array = bytearray(b'hello') my_array[0] = 0x79 print(my_array) \"\"\" bytearray 与", "\"\"\" bytearray 与 bytes 一样,也可以用 memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview", "一样,也可以用 memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray, 而不像 bytes", "拆散再拼起来 \"\"\" my_array = bytearray(b'row, row, row your boat') my_view = memoryview(my_array) write_view", "chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值 my_array = bytearray(b'hello') my_array[0]", "my_array = bytearray(b'row, row, row your boat') my_view = memoryview(my_array) write_view = my_view[3:13]", "# @CreateTime : 2021/10/25 22:52 # @Author : xujiahui # @Project : robust_python", "则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值 my_array = bytearray(b'hello') my_array[0] = 0x79 print(my_array) \"\"\"", "背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray, 而不像 bytes 那样,必须先将 bytes 拆散再拼起来 \"\"\" my_array = bytearray(b'row,", "memoryview 来实现零拷贝 import timeit data = b\"shave and a haircut, two bits\" view", ": V0.0.1 # @Desc : ? # 借助于 memoryview 来实现零拷贝 import timeit data", "data: \", chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值 my_array =", "write_view[:] = b'-10 bytes-' print(my_array) \"\"\" Python 里面很多库之中的方法,例如 socket.recv_into 与 RawIOBase.readinto,都使用缓冲协议来迅速接受或读取数据。 这种方法的好处是不用分配内存,也不用给原数据制作副本,它们会把收到的内容直接写入现有的缓冲区。 \"\"\"", "my_array[0] = 0x79 print(my_array) \"\"\" bytearray 与 bytes 一样,也可以用 memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换,", "= view[12:19] print(chunk) print(\"Size: \", chunk.nbytes) print(\"Data in view: \", chunk.tobytes()) print(\"Underlying data:", "import timeit data = b\"shave and a haircut, two bits\" view = memoryview(data)", "来实现零拷贝 import timeit data = b\"shave and a haircut, two bits\" view =", "= memoryview(data) chunk = view[12:19] print(chunk) print(\"Size: \", chunk.nbytes) print(\"Data in view: \",", "row your boat') my_view = memoryview(my_array) write_view = my_view[3:13] write_view[:] = b'-10 bytes-'", "= b\"shave and a haircut, two bits\" view = memoryview(data) chunk = view[12:19]", "robust_python # @File : zero_copy_prc1.py # @Version : V0.0.1 # @Desc : ?", "= bytearray(b'row, row, row your boat') my_view = memoryview(my_array) write_view = my_view[3:13] write_view[:]", "zero_copy_prc1.py # @Version : V0.0.1 # @Desc : ? # 借助于 memoryview 来实现零拷贝", "# @Author : xujiahui # @Project : robust_python # @File : zero_copy_prc1.py #", "print(\"Underlying data: \", chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值 my_array", "bytes 拆散再拼起来 \"\"\" my_array = bytearray(b'row, row, row your boat') my_view = memoryview(my_array)", "= 0x79 print(my_array) \"\"\" bytearray 与 bytes 一样,也可以用 memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是", "来修改它所封装的 bytearray, 而不像 bytes 那样,必须先将 bytes 拆散再拼起来 \"\"\" my_array = bytearray(b'row, row, row", "借助于 memoryview 来实现零拷贝 import timeit data = b\"shave and a haircut, two bits\"", "那样,必须先将 bytes 拆散再拼起来 \"\"\" my_array = bytearray(b'row, row, row your boat') my_view =", "write_view = my_view[3:13] write_view[:] = b'-10 bytes-' print(my_array) \"\"\" Python 里面很多库之中的方法,例如 socket.recv_into 与", "@File : zero_copy_prc1.py # @Version : V0.0.1 # @Desc : ? # 借助于", "timeit data = b\"shave and a haircut, two bits\" view = memoryview(data) chunk", "那样,采用b开头的字面值 my_array = bytearray(b'hello') my_array[0] = 0x79 print(my_array) \"\"\" bytearray 与 bytes 一样,也可以用", "而不像 bytes 那样,必须先将 bytes 拆散再拼起来 \"\"\" my_array = bytearray(b'row, row, row your boat')", "bytearray 与 bytes 一样,也可以用 memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的", "xujiahui # @Project : robust_python # @File : zero_copy_prc1.py # @Version : V0.0.1", "与 bytes 一样,也可以用 memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray,", "22:52 # @Author : xujiahui # @Project : robust_python # @File : zero_copy_prc1.py", "and a haircut, two bits\" view = memoryview(data) chunk = view[12:19] print(chunk) print(\"Size:", "<gh_stars>0 #!/user/bin/env python # -*-coding:utf-8 -*- # @CreateTime : 2021/10/25 22:52 # @Author", "memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray, 而不像 bytes 那样,必须先将", "your boat') my_view = memoryview(my_array) write_view = my_view[3:13] write_view[:] = b'-10 bytes-' print(my_array)", "封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray, 而不像 bytes 那样,必须先将 bytes", ": 2021/10/25 22:52 # @Author : xujiahui # @Project : robust_python # @File", "boat') my_view = memoryview(my_array) write_view = my_view[3:13] write_view[:] = b'-10 bytes-' print(my_array) \"\"\"", "0x79 print(my_array) \"\"\" bytearray 与 bytes 一样,也可以用 memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview", "memoryview 来修改它所封装的 bytearray, 而不像 bytes 那样,必须先将 bytes 拆散再拼起来 \"\"\" my_array = bytearray(b'row, row,", "V0.0.1 # @Desc : ? # 借助于 memoryview 来实现零拷贝 import timeit data =", "# @Version : V0.0.1 # @Desc : ? # 借助于 memoryview 来实现零拷贝 import", "@Version : V0.0.1 # @Desc : ? # 借助于 memoryview 来实现零拷贝 import timeit", "这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray, 而不像 bytes 那样,必须先将 bytes 拆散再拼起来 \"\"\" my_array", "memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray, 而不像 bytes 那样,必须先将 bytes 拆散再拼起来", "# bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值 my_array = bytearray(b'hello') my_array[0] =", "my_view = memoryview(my_array) write_view = my_view[3:13] write_view[:] = b'-10 bytes-' print(my_array) \"\"\" Python", "\", chunk.tobytes()) print(\"Underlying data: \", chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes", "row, row your boat') my_view = memoryview(my_array) write_view = my_view[3:13] write_view[:] = b'-10", "@Project : robust_python # @File : zero_copy_prc1.py # @Version : V0.0.1 # @Desc", "memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray, 而不像 bytes 那样,必须先将 bytes 拆散再拼起来 \"\"\" my_array =", "bytearray(b'row, row, row your boat') my_view = memoryview(my_array) write_view = my_view[3:13] write_view[:] =", "print(\"Data in view: \", chunk.tobytes()) print(\"Underlying data: \", chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes,", "my_view[3:13] write_view[:] = b'-10 bytes-' print(my_array) \"\"\" Python 里面很多库之中的方法,例如 socket.recv_into 与 RawIOBase.readinto,都使用缓冲协议来迅速接受或读取数据。 这种方法的好处是不用分配内存,也不用给原数据制作副本,它们会把收到的内容直接写入现有的缓冲区。", "# 借助于 memoryview 来实现零拷贝 import timeit data = b\"shave and a haircut, two", "python # -*-coding:utf-8 -*- # @CreateTime : 2021/10/25 22:52 # @Author : xujiahui", "print(\"Size: \", chunk.nbytes) print(\"Data in view: \", chunk.tobytes()) print(\"Underlying data: \", chunk.obj) #", "view[12:19] print(chunk) print(\"Size: \", chunk.nbytes) print(\"Data in view: \", chunk.tobytes()) print(\"Underlying data: \",", "\"\"\" my_array = bytearray(b'row, row, row your boat') my_view = memoryview(my_array) write_view =", "-*- # @CreateTime : 2021/10/25 22:52 # @Author : xujiahui # @Project :", "view = memoryview(data) chunk = view[12:19] print(chunk) print(\"Size: \", chunk.nbytes) print(\"Data in view:", "= bytearray(b'hello') my_array[0] = 0x79 print(my_array) \"\"\" bytearray 与 bytes 一样,也可以用 memoryview 封装,在这种", "\", chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值 my_array = bytearray(b'hello')", "\", chunk.nbytes) print(\"Data in view: \", chunk.tobytes()) print(\"Underlying data: \", chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而", "bytearray 则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值 my_array = bytearray(b'hello') my_array[0] = 0x79 print(my_array)", "bytes 那样,采用b开头的字面值 my_array = bytearray(b'hello') my_array[0] = 0x79 print(my_array) \"\"\" bytearray 与 bytes", "# @Desc : ? # 借助于 memoryview 来实现零拷贝 import timeit data = b\"shave", "bytearray, 而不像 bytes 那样,必须先将 bytes 拆散再拼起来 \"\"\" my_array = bytearray(b'row, row, row your", "my_array = bytearray(b'hello') my_array[0] = 0x79 print(my_array) \"\"\" bytearray 与 bytes 一样,也可以用 memoryview", "in view: \", chunk.tobytes()) print(\"Underlying data: \", chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes, #", "a haircut, two bits\" view = memoryview(data) chunk = view[12:19] print(chunk) print(\"Size: \",", "print(my_array) \"\"\" bytearray 与 bytes 一样,也可以用 memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过", "memoryview(data) chunk = view[12:19] print(chunk) print(\"Size: \", chunk.nbytes) print(\"Data in view: \", chunk.tobytes())", ": ? # 借助于 memoryview 来实现零拷贝 import timeit data = b\"shave and a", "chunk = view[12:19] print(chunk) print(\"Size: \", chunk.nbytes) print(\"Data in view: \", chunk.tobytes()) print(\"Underlying", "2021/10/25 22:52 # @Author : xujiahui # @Project : robust_python # @File :", ": zero_copy_prc1.py # @Version : V0.0.1 # @Desc : ? # 借助于 memoryview", "data = b\"shave and a haircut, two bits\" view = memoryview(data) chunk =", "bits\" view = memoryview(data) chunk = view[12:19] print(chunk) print(\"Size: \", chunk.nbytes) print(\"Data in", "haircut, two bits\" view = memoryview(data) chunk = view[12:19] print(chunk) print(\"Size: \", chunk.nbytes)", "-*-coding:utf-8 -*- # @CreateTime : 2021/10/25 22:52 # @Author : xujiahui # @Project", "上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray, 而不像 bytes 那样,必须先将 bytes 拆散再拼起来 \"\"\"", "bytes 那样,必须先将 bytes 拆散再拼起来 \"\"\" my_array = bytearray(b'row, row, row your boat') my_view", "@Desc : ? # 借助于 memoryview 来实现零拷贝 import timeit data = b\"shave and", "= memoryview(my_array) write_view = my_view[3:13] write_view[:] = b'-10 bytes-' print(my_array) \"\"\" Python 里面很多库之中的方法,例如", "bytes 一样,也可以用 memoryview 封装,在这种 memoryview 上面切割出来的对象,其内容可以用另一份数据替换, 这样做,替换的实际上是 memoryview 背后那个底层缓冲区里面的相应部分。这使得我们可以通过 memoryview 来修改它所封装的 bytearray, 而不像", "print(chunk) print(\"Size: \", chunk.nbytes) print(\"Data in view: \", chunk.tobytes()) print(\"Underlying data: \", chunk.obj)", "bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值 my_array = bytearray(b'hello') my_array[0] = 0x79", "chunk.tobytes()) print(\"Underlying data: \", chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray 则相当于可修改的bytes, # 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值", "@CreateTime : 2021/10/25 22:52 # @Author : xujiahui # @Project : robust_python #", "memoryview(my_array) write_view = my_view[3:13] write_view[:] = b'-10 bytes-' print(my_array) \"\"\" Python 里面很多库之中的方法,例如 socket.recv_into", "= my_view[3:13] write_view[:] = b'-10 bytes-' print(my_array) \"\"\" Python 里面很多库之中的方法,例如 socket.recv_into 与 RawIOBase.readinto,都使用缓冲协议来迅速接受或读取数据。", "# 它允许我们修改任意位置上面的内容,bytearray采用整数表示其中的内容,而不像 bytes 那样,采用b开头的字面值 my_array = bytearray(b'hello') my_array[0] = 0x79 print(my_array) \"\"\" bytearray", ": robust_python # @File : zero_copy_prc1.py # @Version : V0.0.1 # @Desc :", "@Author : xujiahui # @Project : robust_python # @File : zero_copy_prc1.py # @Version", ": xujiahui # @Project : robust_python # @File : zero_copy_prc1.py # @Version :", "? # 借助于 memoryview 来实现零拷贝 import timeit data = b\"shave and a haircut,", "two bits\" view = memoryview(data) chunk = view[12:19] print(chunk) print(\"Size: \", chunk.nbytes) print(\"Data", "bytearray(b'hello') my_array[0] = 0x79 print(my_array) \"\"\" bytearray 与 bytes 一样,也可以用 memoryview 封装,在这种 memoryview", "# -*-coding:utf-8 -*- # @CreateTime : 2021/10/25 22:52 # @Author : xujiahui #", "b\"shave and a haircut, two bits\" view = memoryview(data) chunk = view[12:19] print(chunk)", "chunk.nbytes) print(\"Data in view: \", chunk.tobytes()) print(\"Underlying data: \", chunk.obj) # bytes有个限制,就是只能读取不能修改,我们不能单独更新其中某个位置上的字节,而 bytearray" ]
[ "= [None for i in range(capacity)] def next_slot(self) -> int: for i in", "is None: return i return -1 def append(self, index: int=-1): spot = index", "= item def input(key): if key == input_handler.Keys.escape: application.quit() if __name__ == '__main__':", "capacity self.scale=(capacity/10, 0.1) self.position=(-capacity/20, -0.35) self.texture_scale= (capacity, 1) self.item_parent = Entity(parent=self, scale=(1/capacity, 1))", "None: return i return -1 def append(self, index: int=-1): spot = index if", "index if index>=0 else self.next_slot() if spot >= 0: item = Button(parent=self.item_parent, model", "\"quad\", color=color.random_color(), position=(spot, 0), origin=(-0.5, 0.5)) self.spots[spot] = item def input(key): if key", "capacity): super(Inventory, self).__init__( parent=camera.ui, model='quad', origin=(-0.5, 0.5), texture=\"white_cube\", color=color.dark_gray ) self.capacity = capacity", "1)) self.spots = [None for i in range(capacity)] def next_slot(self) -> int: for", "= \"quad\", color=color.random_color(), position=(spot, 0), origin=(-0.5, 0.5)) self.spots[spot] = item def input(key): if", "0: item = Button(parent=self.item_parent, model = \"quad\", color=color.random_color(), position=(spot, 0), origin=(-0.5, 0.5)) self.spots[spot]", "self.scale=(capacity/10, 0.1) self.position=(-capacity/20, -0.35) self.texture_scale= (capacity, 1) self.item_parent = Entity(parent=self, scale=(1/capacity, 1)) self.spots", "self.next_slot() if spot >= 0: item = Button(parent=self.item_parent, model = \"quad\", color=color.random_color(), position=(spot,", "in range(capacity)] def next_slot(self) -> int: for i in range(self.capacity): if self.spots[i] is", "for i in range(self.capacity): if self.spots[i] is None: return i return -1 def", "spot >= 0: item = Button(parent=self.item_parent, model = \"quad\", color=color.random_color(), position=(spot, 0), origin=(-0.5,", "self.capacity = capacity self.scale=(capacity/10, 0.1) self.position=(-capacity/20, -0.35) self.texture_scale= (capacity, 1) self.item_parent = Entity(parent=self,", "color=color.dark_gray ) self.capacity = capacity self.scale=(capacity/10, 0.1) self.position=(-capacity/20, -0.35) self.texture_scale= (capacity, 1) self.item_parent", "self).__init__( parent=camera.ui, model='quad', origin=(-0.5, 0.5), texture=\"white_cube\", color=color.dark_gray ) self.capacity = capacity self.scale=(capacity/10, 0.1)", "from ursina import * class Inventory(Entity): def __init__(self, capacity): super(Inventory, self).__init__( parent=camera.ui, model='quad',", "self.spots[spot] = item def input(key): if key == input_handler.Keys.escape: application.quit() if __name__ ==", "self.spots[i] is None: return i return -1 def append(self, index: int=-1): spot =", "texture=\"white_cube\", color=color.dark_gray ) self.capacity = capacity self.scale=(capacity/10, 0.1) self.position=(-capacity/20, -0.35) self.texture_scale= (capacity, 1)", "index>=0 else self.next_slot() if spot >= 0: item = Button(parent=self.item_parent, model = \"quad\",", "class Inventory(Entity): def __init__(self, capacity): super(Inventory, self).__init__( parent=camera.ui, model='quad', origin=(-0.5, 0.5), texture=\"white_cube\", color=color.dark_gray", "position=(spot, 0), origin=(-0.5, 0.5)) self.spots[spot] = item def input(key): if key == input_handler.Keys.escape:", "self.texture_scale= (capacity, 1) self.item_parent = Entity(parent=self, scale=(1/capacity, 1)) self.spots = [None for i", "= index if index>=0 else self.next_slot() if spot >= 0: item = Button(parent=self.item_parent,", "super(Inventory, self).__init__( parent=camera.ui, model='quad', origin=(-0.5, 0.5), texture=\"white_cube\", color=color.dark_gray ) self.capacity = capacity self.scale=(capacity/10,", "def input(key): if key == input_handler.Keys.escape: application.quit() if __name__ == '__main__': app =", "item = Button(parent=self.item_parent, model = \"quad\", color=color.random_color(), position=(spot, 0), origin=(-0.5, 0.5)) self.spots[spot] =", "input(key): if key == input_handler.Keys.escape: application.quit() if __name__ == '__main__': app = Ursina()", "model='quad', origin=(-0.5, 0.5), texture=\"white_cube\", color=color.dark_gray ) self.capacity = capacity self.scale=(capacity/10, 0.1) self.position=(-capacity/20, -0.35)", "(capacity, 1) self.item_parent = Entity(parent=self, scale=(1/capacity, 1)) self.spots = [None for i in", "-> int: for i in range(self.capacity): if self.spots[i] is None: return i return", "def append(self, index: int=-1): spot = index if index>=0 else self.next_slot() if spot", "def __init__(self, capacity): super(Inventory, self).__init__( parent=camera.ui, model='quad', origin=(-0.5, 0.5), texture=\"white_cube\", color=color.dark_gray ) self.capacity", "parent=camera.ui, model='quad', origin=(-0.5, 0.5), texture=\"white_cube\", color=color.dark_gray ) self.capacity = capacity self.scale=(capacity/10, 0.1) self.position=(-capacity/20,", "ursina import * class Inventory(Entity): def __init__(self, capacity): super(Inventory, self).__init__( parent=camera.ui, model='quad', origin=(-0.5,", "-0.35) self.texture_scale= (capacity, 1) self.item_parent = Entity(parent=self, scale=(1/capacity, 1)) self.spots = [None for", "== input_handler.Keys.escape: application.quit() if __name__ == '__main__': app = Ursina() window.fullscreen = True", "origin=(-0.5, 0.5), texture=\"white_cube\", color=color.dark_gray ) self.capacity = capacity self.scale=(capacity/10, 0.1) self.position=(-capacity/20, -0.35) self.texture_scale=", "* class Inventory(Entity): def __init__(self, capacity): super(Inventory, self).__init__( parent=camera.ui, model='quad', origin=(-0.5, 0.5), texture=\"white_cube\",", "1) self.item_parent = Entity(parent=self, scale=(1/capacity, 1)) self.spots = [None for i in range(capacity)]", "spot = index if index>=0 else self.next_slot() if spot >= 0: item =", "Inventory(Entity): def __init__(self, capacity): super(Inventory, self).__init__( parent=camera.ui, model='quad', origin=(-0.5, 0.5), texture=\"white_cube\", color=color.dark_gray )", "-1 def append(self, index: int=-1): spot = index if index>=0 else self.next_slot() if", "self.spots = [None for i in range(capacity)] def next_slot(self) -> int: for i", "__name__ == '__main__': app = Ursina() window.fullscreen = True inventory = Inventory(8) inventory.append(0)", "0.5)) self.spots[spot] = item def input(key): if key == input_handler.Keys.escape: application.quit() if __name__", "else self.next_slot() if spot >= 0: item = Button(parent=self.item_parent, model = \"quad\", color=color.random_color(),", "model = \"quad\", color=color.random_color(), position=(spot, 0), origin=(-0.5, 0.5)) self.spots[spot] = item def input(key):", "color=color.random_color(), position=(spot, 0), origin=(-0.5, 0.5)) self.spots[spot] = item def input(key): if key ==", "[None for i in range(capacity)] def next_slot(self) -> int: for i in range(self.capacity):", "i in range(capacity)] def next_slot(self) -> int: for i in range(self.capacity): if self.spots[i]", "import * class Inventory(Entity): def __init__(self, capacity): super(Inventory, self).__init__( parent=camera.ui, model='quad', origin=(-0.5, 0.5),", "if spot >= 0: item = Button(parent=self.item_parent, model = \"quad\", color=color.random_color(), position=(spot, 0),", "in range(self.capacity): if self.spots[i] is None: return i return -1 def append(self, index:", "= Button(parent=self.item_parent, model = \"quad\", color=color.random_color(), position=(spot, 0), origin=(-0.5, 0.5)) self.spots[spot] = item", "int=-1): spot = index if index>=0 else self.next_slot() if spot >= 0: item", "__init__(self, capacity): super(Inventory, self).__init__( parent=camera.ui, model='quad', origin=(-0.5, 0.5), texture=\"white_cube\", color=color.dark_gray ) self.capacity =", "def next_slot(self) -> int: for i in range(self.capacity): if self.spots[i] is None: return", "input_handler.Keys.escape: application.quit() if __name__ == '__main__': app = Ursina() window.fullscreen = True inventory", "if self.spots[i] is None: return i return -1 def append(self, index: int=-1): spot", "return -1 def append(self, index: int=-1): spot = index if index>=0 else self.next_slot()", "i in range(self.capacity): if self.spots[i] is None: return i return -1 def append(self,", ") self.capacity = capacity self.scale=(capacity/10, 0.1) self.position=(-capacity/20, -0.35) self.texture_scale= (capacity, 1) self.item_parent =", "if __name__ == '__main__': app = Ursina() window.fullscreen = True inventory = Inventory(8)", "index: int=-1): spot = index if index>=0 else self.next_slot() if spot >= 0:", "scale=(1/capacity, 1)) self.spots = [None for i in range(capacity)] def next_slot(self) -> int:", "origin=(-0.5, 0.5)) self.spots[spot] = item def input(key): if key == input_handler.Keys.escape: application.quit() if", "if key == input_handler.Keys.escape: application.quit() if __name__ == '__main__': app = Ursina() window.fullscreen", "0.1) self.position=(-capacity/20, -0.35) self.texture_scale= (capacity, 1) self.item_parent = Entity(parent=self, scale=(1/capacity, 1)) self.spots =", "0), origin=(-0.5, 0.5)) self.spots[spot] = item def input(key): if key == input_handler.Keys.escape: application.quit()", "Entity(parent=self, scale=(1/capacity, 1)) self.spots = [None for i in range(capacity)] def next_slot(self) ->", "'__main__': app = Ursina() window.fullscreen = True inventory = Inventory(8) inventory.append(0) inventory.append(1) app.run()", "Button(parent=self.item_parent, model = \"quad\", color=color.random_color(), position=(spot, 0), origin=(-0.5, 0.5)) self.spots[spot] = item def", "0.5), texture=\"white_cube\", color=color.dark_gray ) self.capacity = capacity self.scale=(capacity/10, 0.1) self.position=(-capacity/20, -0.35) self.texture_scale= (capacity,", "append(self, index: int=-1): spot = index if index>=0 else self.next_slot() if spot >=", "== '__main__': app = Ursina() window.fullscreen = True inventory = Inventory(8) inventory.append(0) inventory.append(1)", "return i return -1 def append(self, index: int=-1): spot = index if index>=0", "range(capacity)] def next_slot(self) -> int: for i in range(self.capacity): if self.spots[i] is None:", "if index>=0 else self.next_slot() if spot >= 0: item = Button(parent=self.item_parent, model =", "key == input_handler.Keys.escape: application.quit() if __name__ == '__main__': app = Ursina() window.fullscreen =", "self.item_parent = Entity(parent=self, scale=(1/capacity, 1)) self.spots = [None for i in range(capacity)] def", ">= 0: item = Button(parent=self.item_parent, model = \"quad\", color=color.random_color(), position=(spot, 0), origin=(-0.5, 0.5))", "next_slot(self) -> int: for i in range(self.capacity): if self.spots[i] is None: return i", "= capacity self.scale=(capacity/10, 0.1) self.position=(-capacity/20, -0.35) self.texture_scale= (capacity, 1) self.item_parent = Entity(parent=self, scale=(1/capacity,", "self.position=(-capacity/20, -0.35) self.texture_scale= (capacity, 1) self.item_parent = Entity(parent=self, scale=(1/capacity, 1)) self.spots = [None", "= Entity(parent=self, scale=(1/capacity, 1)) self.spots = [None for i in range(capacity)] def next_slot(self)", "item def input(key): if key == input_handler.Keys.escape: application.quit() if __name__ == '__main__': app", "int: for i in range(self.capacity): if self.spots[i] is None: return i return -1", "i return -1 def append(self, index: int=-1): spot = index if index>=0 else", "for i in range(capacity)] def next_slot(self) -> int: for i in range(self.capacity): if", "application.quit() if __name__ == '__main__': app = Ursina() window.fullscreen = True inventory =", "range(self.capacity): if self.spots[i] is None: return i return -1 def append(self, index: int=-1):" ]
[ "in [ ([1, 1, 2, 2, 3, 3], 3), ([1, 1, 2, 3],", "[ ([1, 1, 2, 2, 3, 3], 3), ([1, 1, 2, 3], 2),", "3, 3], 3), ([1, 1, 2, 3], 2), ([6, 6, 6, 6], 1),", "def distributeCandies(self, candyType: List[int]) -> int: return min(len(candyType) // 2, len(set(candyType))) # TESTS", "List[int]) -> int: return min(len(candyType) // 2, len(set(candyType))) # TESTS for candyType, expected", "<reponame>l33tdaima/l33tdaima<gh_stars>1-10 from typing import List class Solution: def distributeCandies(self, candyType: List[int]) -> int:", "= Solution() actual = sol.distributeCandies(candyType) print(\"The maximum number of different types in\", candyType,", "actual = sol.distributeCandies(candyType) print(\"The maximum number of different types in\", candyType, \"->\", actual)", "2, 3, 3], 3), ([1, 1, 2, 3], 2), ([6, 6, 6, 6],", "class Solution: def distributeCandies(self, candyType: List[int]) -> int: return min(len(candyType) // 2, len(set(candyType)))", "([1, 1, 2, 2, 3, 3], 3), ([1, 1, 2, 3], 2), ([6,", "2), ([6, 6, 6, 6], 1), ]: sol = Solution() actual = sol.distributeCandies(candyType)", "]: sol = Solution() actual = sol.distributeCandies(candyType) print(\"The maximum number of different types", "distributeCandies(self, candyType: List[int]) -> int: return min(len(candyType) // 2, len(set(candyType))) # TESTS for", "maximum number of different types in\", candyType, \"->\", actual) assert actual == expected", "Solution: def distributeCandies(self, candyType: List[int]) -> int: return min(len(candyType) // 2, len(set(candyType))) #", "3), ([1, 1, 2, 3], 2), ([6, 6, 6, 6], 1), ]: sol", "1, 2, 3], 2), ([6, 6, 6, 6], 1), ]: sol = Solution()", "1, 2, 2, 3, 3], 3), ([1, 1, 2, 3], 2), ([6, 6,", "len(set(candyType))) # TESTS for candyType, expected in [ ([1, 1, 2, 2, 3,", "expected in [ ([1, 1, 2, 2, 3, 3], 3), ([1, 1, 2,", "Solution() actual = sol.distributeCandies(candyType) print(\"The maximum number of different types in\", candyType, \"->\",", "import List class Solution: def distributeCandies(self, candyType: List[int]) -> int: return min(len(candyType) //", "typing import List class Solution: def distributeCandies(self, candyType: List[int]) -> int: return min(len(candyType)", "min(len(candyType) // 2, len(set(candyType))) # TESTS for candyType, expected in [ ([1, 1,", "1), ]: sol = Solution() actual = sol.distributeCandies(candyType) print(\"The maximum number of different", "([6, 6, 6, 6], 1), ]: sol = Solution() actual = sol.distributeCandies(candyType) print(\"The", "TESTS for candyType, expected in [ ([1, 1, 2, 2, 3, 3], 3),", "3], 3), ([1, 1, 2, 3], 2), ([6, 6, 6, 6], 1), ]:", "# TESTS for candyType, expected in [ ([1, 1, 2, 2, 3, 3],", "-> int: return min(len(candyType) // 2, len(set(candyType))) # TESTS for candyType, expected in", "2, 2, 3, 3], 3), ([1, 1, 2, 3], 2), ([6, 6, 6,", "3], 2), ([6, 6, 6, 6], 1), ]: sol = Solution() actual =", "candyType, expected in [ ([1, 1, 2, 2, 3, 3], 3), ([1, 1,", "2, 3], 2), ([6, 6, 6, 6], 1), ]: sol = Solution() actual", "sol = Solution() actual = sol.distributeCandies(candyType) print(\"The maximum number of different types in\",", "print(\"The maximum number of different types in\", candyType, \"->\", actual) assert actual ==", "= sol.distributeCandies(candyType) print(\"The maximum number of different types in\", candyType, \"->\", actual) assert", "int: return min(len(candyType) // 2, len(set(candyType))) # TESTS for candyType, expected in [", "6, 6], 1), ]: sol = Solution() actual = sol.distributeCandies(candyType) print(\"The maximum number", "// 2, len(set(candyType))) # TESTS for candyType, expected in [ ([1, 1, 2,", "from typing import List class Solution: def distributeCandies(self, candyType: List[int]) -> int: return", "([1, 1, 2, 3], 2), ([6, 6, 6, 6], 1), ]: sol =", "6, 6, 6], 1), ]: sol = Solution() actual = sol.distributeCandies(candyType) print(\"The maximum", "6], 1), ]: sol = Solution() actual = sol.distributeCandies(candyType) print(\"The maximum number of", "sol.distributeCandies(candyType) print(\"The maximum number of different types in\", candyType, \"->\", actual) assert actual", "2, len(set(candyType))) # TESTS for candyType, expected in [ ([1, 1, 2, 2,", "List class Solution: def distributeCandies(self, candyType: List[int]) -> int: return min(len(candyType) // 2,", "candyType: List[int]) -> int: return min(len(candyType) // 2, len(set(candyType))) # TESTS for candyType,", "for candyType, expected in [ ([1, 1, 2, 2, 3, 3], 3), ([1,", "return min(len(candyType) // 2, len(set(candyType))) # TESTS for candyType, expected in [ ([1," ]
[ "models.MplsModel() controller = controllers.MplsController(self._window, model) view = views.MplsView(self._window, controller) # Ponemos el tirulo", "la primera practica \"\"\" model = models.RtpModel() controller = controllers.RtpController(self._window, model) view =", "ventana. pasamos a crear todos los componentes para la primera practica \"\"\" model", "de los paquetes MPLS y mostrarlos en pantalla \"\"\" model = models.MplsModel() controller", "la vista de la ventana. Creamos todos los componentes necesarios para realizar la", "Creamos todos los componentes necesarios para realizar la captura de los paquetes MPLS", "programa. \"\"\" from src.controller.Controller import Controller from src.model import models from src.controller import", "MPLS y mostrarlos en pantalla \"\"\" model = models.MplsModel() controller = controllers.MplsController(self._window, model)", "event): # FIXME: implementar esta función para regresar al menu anterior pass def", "class MainController(Controller): def back(self, event): # FIXME: implementar esta función para regresar al", "views.MplsView(self._window, controller) # Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector cabecera MPLS\")", "models from src.controller import controllers from src.view_app import views class MainController(Controller): def back(self,", "los componentes para la primera practica \"\"\" model = models.RtpModel() controller = controllers.RtpController(self._window,", "mostrarlos en pantalla \"\"\" model = models.MplsModel() controller = controllers.MplsController(self._window, model) view =", "crear todos los componentes para la primera practica \"\"\" model = models.RtpModel() controller", "UTF-8 -*- \"\"\"Lista de controladores del programa. En este fichero podemos encontrarnos todos", "self._window.set_title(\"Lector cabecera MPLS\") controller.set_view(view) view.init_view() def captura_rtp(self, event): \"\"\"Cambia la vista de la", "implementar esta función para regresar al menu anterior pass def open_pcapng(self, event): pass", "primera practica \"\"\" model = models.RtpModel() controller = controllers.RtpController(self._window, model) view = views.RtpView(self._window,", "vistas de nuestro programa. \"\"\" from src.controller.Controller import Controller from src.model import models", "views class MainController(Controller): def back(self, event): # FIXME: implementar esta función para regresar", "paquetes MPLS y mostrarlos en pantalla \"\"\" model = models.MplsModel() controller = controllers.MplsController(self._window,", "regresar al menu anterior pass def open_pcapng(self, event): pass def captura_mpls(self, event): \"\"\"Cambia", "controllers from src.view_app import views class MainController(Controller): def back(self, event): # FIXME: implementar", "anterior pass def open_pcapng(self, event): pass def captura_mpls(self, event): \"\"\"Cambia la vista de", "models.RtpModel() controller = controllers.RtpController(self._window, model) view = views.RtpView(self._window, controller) # Ponemos el tirulo", "import Controller from src.model import models from src.controller import controllers from src.view_app import", "captura_rtp(self, event): \"\"\"Cambia la vista de la ventana. pasamos a crear todos los", "podemos encontrarnos todos los controladores, de todas las vistas de nuestro programa. \"\"\"", "la ventana. pasamos a crear todos los componentes para la primera practica \"\"\"", "y mostrarlos en pantalla \"\"\" model = models.MplsModel() controller = controllers.MplsController(self._window, model) view", "para realizar la captura de los paquetes MPLS y mostrarlos en pantalla \"\"\"", "nuestro programa. \"\"\" from src.controller.Controller import Controller from src.model import models from src.controller", "src.model import models from src.controller import controllers from src.view_app import views class MainController(Controller):", "\"\"\" model = models.RtpModel() controller = controllers.RtpController(self._window, model) view = views.RtpView(self._window, controller) #", "fichero podemos encontrarnos todos los controladores, de todas las vistas de nuestro programa.", "a crear todos los componentes para la primera practica \"\"\" model = models.RtpModel()", "la vista de la ventana. pasamos a crear todos los componentes para la", "de la ventana. pasamos a crear todos los componentes para la primera practica", "ventana. Creamos todos los componentes necesarios para realizar la captura de los paquetes", "tirulo a la nueva vista self._window.set_title(\"Lector cabecera MPLS\") controller.set_view(view) view.init_view() def captura_rtp(self, event):", "En este fichero podemos encontrarnos todos los controladores, de todas las vistas de", "#!/usr/bin/python # -*- coding: UTF-8 -*- \"\"\"Lista de controladores del programa. En este", "import controllers from src.view_app import views class MainController(Controller): def back(self, event): # FIXME:", "necesarios para realizar la captura de los paquetes MPLS y mostrarlos en pantalla", "# FIXME: implementar esta función para regresar al menu anterior pass def open_pcapng(self,", "vista self._window.set_title(\"Lector cabecera MPLS\") controller.set_view(view) view.init_view() def captura_rtp(self, event): \"\"\"Cambia la vista de", "de todas las vistas de nuestro programa. \"\"\" from src.controller.Controller import Controller from", "view = views.MplsView(self._window, controller) # Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector", "model) view = views.RtpView(self._window, controller) # Ponemos el tirulo a la nueva vista", "practica \"\"\" model = models.RtpModel() controller = controllers.RtpController(self._window, model) view = views.RtpView(self._window, controller)", "def captura_rtp(self, event): \"\"\"Cambia la vista de la ventana. pasamos a crear todos", "programa. En este fichero podemos encontrarnos todos los controladores, de todas las vistas", "\"\"\"Lista de controladores del programa. En este fichero podemos encontrarnos todos los controladores,", "\"\"\" model = models.MplsModel() controller = controllers.MplsController(self._window, model) view = views.MplsView(self._window, controller) #", "el tirulo a la nueva vista self._window.set_title(\"Lector cabecera MPLS\") controller.set_view(view) view.init_view() def captura_rtp(self,", "pass def captura_mpls(self, event): \"\"\"Cambia la vista de la ventana. Creamos todos los", "import models from src.controller import controllers from src.view_app import views class MainController(Controller): def", "<gh_stars>1-10 #!/usr/bin/python # -*- coding: UTF-8 -*- \"\"\"Lista de controladores del programa. En", "los controladores, de todas las vistas de nuestro programa. \"\"\" from src.controller.Controller import", "captura_mpls(self, event): \"\"\"Cambia la vista de la ventana. Creamos todos los componentes necesarios", "los paquetes MPLS y mostrarlos en pantalla \"\"\" model = models.MplsModel() controller =", "event): pass def captura_mpls(self, event): \"\"\"Cambia la vista de la ventana. Creamos todos", "def back(self, event): # FIXME: implementar esta función para regresar al menu anterior", "from src.model import models from src.controller import controllers from src.view_app import views class", "= controllers.RtpController(self._window, model) view = views.RtpView(self._window, controller) # Ponemos el tirulo a la", "del programa. En este fichero podemos encontrarnos todos los controladores, de todas las", "para la primera practica \"\"\" model = models.RtpModel() controller = controllers.RtpController(self._window, model) view", "from src.view_app import views class MainController(Controller): def back(self, event): # FIXME: implementar esta", "todos los componentes para la primera practica \"\"\" model = models.RtpModel() controller =", "from src.controller import controllers from src.view_app import views class MainController(Controller): def back(self, event):", "view = views.RtpView(self._window, controller) # Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector", "componentes necesarios para realizar la captura de los paquetes MPLS y mostrarlos en", "este fichero podemos encontrarnos todos los controladores, de todas las vistas de nuestro", "src.controller import controllers from src.view_app import views class MainController(Controller): def back(self, event): #", "realizar la captura de los paquetes MPLS y mostrarlos en pantalla \"\"\" model", "def open_pcapng(self, event): pass def captura_mpls(self, event): \"\"\"Cambia la vista de la ventana.", "a la nueva vista self._window.set_title(\"Lector cabecera MPLS\") controller.set_view(view) view.init_view() def captura_rtp(self, event): \"\"\"Cambia", "# Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector cabecera MPLS\") controller.set_view(view) view.init_view()", "de nuestro programa. \"\"\" from src.controller.Controller import Controller from src.model import models from", "views.RtpView(self._window, controller) # Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector de RTP\")", "nueva vista self._window.set_title(\"Lector cabecera MPLS\") controller.set_view(view) view.init_view() def captura_rtp(self, event): \"\"\"Cambia la vista", "MPLS\") controller.set_view(view) view.init_view() def captura_rtp(self, event): \"\"\"Cambia la vista de la ventana. pasamos", "vista de la ventana. pasamos a crear todos los componentes para la primera", "cabecera MPLS\") controller.set_view(view) view.init_view() def captura_rtp(self, event): \"\"\"Cambia la vista de la ventana.", "view.init_view() def captura_rtp(self, event): \"\"\"Cambia la vista de la ventana. pasamos a crear", "la nueva vista self._window.set_title(\"Lector cabecera MPLS\") controller.set_view(view) view.init_view() def captura_rtp(self, event): \"\"\"Cambia la", "vista de la ventana. Creamos todos los componentes necesarios para realizar la captura", "componentes para la primera practica \"\"\" model = models.RtpModel() controller = controllers.RtpController(self._window, model)", "captura de los paquetes MPLS y mostrarlos en pantalla \"\"\" model = models.MplsModel()", "todas las vistas de nuestro programa. \"\"\" from src.controller.Controller import Controller from src.model", "controladores, de todas las vistas de nuestro programa. \"\"\" from src.controller.Controller import Controller", "src.view_app import views class MainController(Controller): def back(self, event): # FIXME: implementar esta función", "controller.set_view(view) view.init_view() def captura_rtp(self, event): \"\"\"Cambia la vista de la ventana. pasamos a", "al menu anterior pass def open_pcapng(self, event): pass def captura_mpls(self, event): \"\"\"Cambia la", "= views.MplsView(self._window, controller) # Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector cabecera", "todos los componentes necesarios para realizar la captura de los paquetes MPLS y", "controladores del programa. En este fichero podemos encontrarnos todos los controladores, de todas", "la captura de los paquetes MPLS y mostrarlos en pantalla \"\"\" model =", "controller = controllers.RtpController(self._window, model) view = views.RtpView(self._window, controller) # Ponemos el tirulo a", "de la ventana. Creamos todos los componentes necesarios para realizar la captura de", "encontrarnos todos los controladores, de todas las vistas de nuestro programa. \"\"\" from", "la ventana. Creamos todos los componentes necesarios para realizar la captura de los", "para regresar al menu anterior pass def open_pcapng(self, event): pass def captura_mpls(self, event):", "función para regresar al menu anterior pass def open_pcapng(self, event): pass def captura_mpls(self,", "controller) # Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector de RTP\") controller.set_view(view)", "event): \"\"\"Cambia la vista de la ventana. Creamos todos los componentes necesarios para", "back(self, event): # FIXME: implementar esta función para regresar al menu anterior pass", "-*- \"\"\"Lista de controladores del programa. En este fichero podemos encontrarnos todos los", "FIXME: implementar esta función para regresar al menu anterior pass def open_pcapng(self, event):", "controller = controllers.MplsController(self._window, model) view = views.MplsView(self._window, controller) # Ponemos el tirulo a", "= models.RtpModel() controller = controllers.RtpController(self._window, model) view = views.RtpView(self._window, controller) # Ponemos el", "\"\"\"Cambia la vista de la ventana. pasamos a crear todos los componentes para", "-*- coding: UTF-8 -*- \"\"\"Lista de controladores del programa. En este fichero podemos", "event): \"\"\"Cambia la vista de la ventana. pasamos a crear todos los componentes", "MainController(Controller): def back(self, event): # FIXME: implementar esta función para regresar al menu", "= views.RtpView(self._window, controller) # Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector de", "\"\"\"Cambia la vista de la ventana. Creamos todos los componentes necesarios para realizar", "en pantalla \"\"\" model = models.MplsModel() controller = controllers.MplsController(self._window, model) view = views.MplsView(self._window,", "controllers.RtpController(self._window, model) view = views.RtpView(self._window, controller) # Ponemos el tirulo a la nueva", "coding: UTF-8 -*- \"\"\"Lista de controladores del programa. En este fichero podemos encontrarnos", "def captura_mpls(self, event): \"\"\"Cambia la vista de la ventana. Creamos todos los componentes", "# Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector de RTP\") controller.set_view(view) view.init_view()", "# -*- coding: UTF-8 -*- \"\"\"Lista de controladores del programa. En este fichero", "model = models.MplsModel() controller = controllers.MplsController(self._window, model) view = views.MplsView(self._window, controller) # Ponemos", "import views class MainController(Controller): def back(self, event): # FIXME: implementar esta función para", "los componentes necesarios para realizar la captura de los paquetes MPLS y mostrarlos", "Controller from src.model import models from src.controller import controllers from src.view_app import views", "Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector cabecera MPLS\") controller.set_view(view) view.init_view() def", "= models.MplsModel() controller = controllers.MplsController(self._window, model) view = views.MplsView(self._window, controller) # Ponemos el", "model) view = views.MplsView(self._window, controller) # Ponemos el tirulo a la nueva vista", "esta función para regresar al menu anterior pass def open_pcapng(self, event): pass def", "pasamos a crear todos los componentes para la primera practica \"\"\" model =", "open_pcapng(self, event): pass def captura_mpls(self, event): \"\"\"Cambia la vista de la ventana. Creamos", "pass def open_pcapng(self, event): pass def captura_mpls(self, event): \"\"\"Cambia la vista de la", "controller) # Ponemos el tirulo a la nueva vista self._window.set_title(\"Lector cabecera MPLS\") controller.set_view(view)", "menu anterior pass def open_pcapng(self, event): pass def captura_mpls(self, event): \"\"\"Cambia la vista", "pantalla \"\"\" model = models.MplsModel() controller = controllers.MplsController(self._window, model) view = views.MplsView(self._window, controller)", "\"\"\" from src.controller.Controller import Controller from src.model import models from src.controller import controllers", "src.controller.Controller import Controller from src.model import models from src.controller import controllers from src.view_app", "controllers.MplsController(self._window, model) view = views.MplsView(self._window, controller) # Ponemos el tirulo a la nueva", "= controllers.MplsController(self._window, model) view = views.MplsView(self._window, controller) # Ponemos el tirulo a la", "from src.controller.Controller import Controller from src.model import models from src.controller import controllers from", "todos los controladores, de todas las vistas de nuestro programa. \"\"\" from src.controller.Controller", "las vistas de nuestro programa. \"\"\" from src.controller.Controller import Controller from src.model import", "de controladores del programa. En este fichero podemos encontrarnos todos los controladores, de", "model = models.RtpModel() controller = controllers.RtpController(self._window, model) view = views.RtpView(self._window, controller) # Ponemos" ]
[ "ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner from ai_traineree.tasks import PettingZooTask env = waterworld_v3.env() task = PettingZooTask(env=env)", "= task.observation_spaces[actor_name] action_space = task.action_spaces[actor_name] agents.append(PPOAgent(obs_space, action_space)) multi_agent = IndependentAgents(agents, agent_names=task.agents) runner =", "that. \"\"\" from pettingzoo.sisl import waterworld_v3 from ai_traineree.agents.ppo import PPOAgent from ai_traineree.multi_agents.independent import", "waterworld_v3.env() task = PettingZooTask(env=env) task.reset() # Needs to be reset to access env.agents()", "= task.action_spaces[actor_name] agents.append(PPOAgent(obs_space, action_space)) multi_agent = IndependentAgents(agents, agent_names=task.agents) runner = MultiAgentCycleEnvRunner(task, multi_agent=multi_agent) runner.run(max_episodes=3)", "yet. Well, maybe they do but it might take a long time to", "check. Ain't nobody got time for that. \"\"\" from pettingzoo.sisl import waterworld_v3 from", "Needs to be reset to access env.agents() agents = [] for actor_name in", "from ai_traineree.tasks import PettingZooTask env = waterworld_v3.env() task = PettingZooTask(env=env) task.reset() # Needs", "but it might take a long time to check. Ain't nobody got time", "take a long time to check. Ain't nobody got time for that. \"\"\"", "from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner from ai_traineree.tasks import PettingZooTask env = waterworld_v3.env() task =", "= [] for actor_name in task.agents: obs_space = task.observation_spaces[actor_name] action_space = task.action_spaces[actor_name] agents.append(PPOAgent(obs_space,", "time to check. Ain't nobody got time for that. \"\"\" from pettingzoo.sisl import", "to be reset to access env.agents() agents = [] for actor_name in task.agents:", "agents = [] for actor_name in task.agents: obs_space = task.observation_spaces[actor_name] action_space = task.action_spaces[actor_name]", "anything useful, yet. Well, maybe they do but it might take a long", "No agent learns here anything useful, yet. Well, maybe they do but it", "from ai_traineree.agents.ppo import PPOAgent from ai_traineree.multi_agents.independent import IndependentAgents from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner from", "reset to access env.agents() agents = [] for actor_name in task.agents: obs_space =", "env = waterworld_v3.env() task = PettingZooTask(env=env) task.reset() # Needs to be reset to", "learns here anything useful, yet. Well, maybe they do but it might take", "import MultiAgentCycleEnvRunner from ai_traineree.tasks import PettingZooTask env = waterworld_v3.env() task = PettingZooTask(env=env) task.reset()", "time for that. \"\"\" from pettingzoo.sisl import waterworld_v3 from ai_traineree.agents.ppo import PPOAgent from", "import PPOAgent from ai_traineree.multi_agents.independent import IndependentAgents from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner from ai_traineree.tasks import", "to access env.agents() agents = [] for actor_name in task.agents: obs_space = task.observation_spaces[actor_name]", "for that. \"\"\" from pettingzoo.sisl import waterworld_v3 from ai_traineree.agents.ppo import PPOAgent from ai_traineree.multi_agents.independent", "import IndependentAgents from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner from ai_traineree.tasks import PettingZooTask env = waterworld_v3.env()", "it might take a long time to check. Ain't nobody got time for", "obs_space = task.observation_spaces[actor_name] action_space = task.action_spaces[actor_name] agents.append(PPOAgent(obs_space, action_space)) multi_agent = IndependentAgents(agents, agent_names=task.agents) runner", "[] for actor_name in task.agents: obs_space = task.observation_spaces[actor_name] action_space = task.action_spaces[actor_name] agents.append(PPOAgent(obs_space, action_space))", "action_space = task.action_spaces[actor_name] agents.append(PPOAgent(obs_space, action_space)) multi_agent = IndependentAgents(agents, agent_names=task.agents) runner = MultiAgentCycleEnvRunner(task, multi_agent=multi_agent)", "env.agents() agents = [] for actor_name in task.agents: obs_space = task.observation_spaces[actor_name] action_space =", "maybe they do but it might take a long time to check. Ain't", "actor_name in task.agents: obs_space = task.observation_spaces[actor_name] action_space = task.action_spaces[actor_name] agents.append(PPOAgent(obs_space, action_space)) multi_agent =", "PettingZooTask env = waterworld_v3.env() task = PettingZooTask(env=env) task.reset() # Needs to be reset", "PettingZooTask(env=env) task.reset() # Needs to be reset to access env.agents() agents = []", "access env.agents() agents = [] for actor_name in task.agents: obs_space = task.observation_spaces[actor_name] action_space", "task.observation_spaces[actor_name] action_space = task.action_spaces[actor_name] agents.append(PPOAgent(obs_space, action_space)) multi_agent = IndependentAgents(agents, agent_names=task.agents) runner = MultiAgentCycleEnvRunner(task,", "ai_traineree.multi_agents.independent import IndependentAgents from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner from ai_traineree.tasks import PettingZooTask env =", "Well, maybe they do but it might take a long time to check.", "\"\"\" from pettingzoo.sisl import waterworld_v3 from ai_traineree.agents.ppo import PPOAgent from ai_traineree.multi_agents.independent import IndependentAgents", "only. No agent learns here anything useful, yet. Well, maybe they do but", "is for demonstartion purpose only. No agent learns here anything useful, yet. Well,", "Ain't nobody got time for that. \"\"\" from pettingzoo.sisl import waterworld_v3 from ai_traineree.agents.ppo", "demonstartion purpose only. No agent learns here anything useful, yet. Well, maybe they", "task = PettingZooTask(env=env) task.reset() # Needs to be reset to access env.agents() agents", "task.agents: obs_space = task.observation_spaces[actor_name] action_space = task.action_spaces[actor_name] agents.append(PPOAgent(obs_space, action_space)) multi_agent = IndependentAgents(agents, agent_names=task.agents)", "might take a long time to check. Ain't nobody got time for that.", "in task.agents: obs_space = task.observation_spaces[actor_name] action_space = task.action_spaces[actor_name] agents.append(PPOAgent(obs_space, action_space)) multi_agent = IndependentAgents(agents,", "for demonstartion purpose only. No agent learns here anything useful, yet. Well, maybe", "nobody got time for that. \"\"\" from pettingzoo.sisl import waterworld_v3 from ai_traineree.agents.ppo import", "ai_traineree.agents.ppo import PPOAgent from ai_traineree.multi_agents.independent import IndependentAgents from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner from ai_traineree.tasks", "= PettingZooTask(env=env) task.reset() # Needs to be reset to access env.agents() agents =", "to check. Ain't nobody got time for that. \"\"\" from pettingzoo.sisl import waterworld_v3", "waterworld_v3 from ai_traineree.agents.ppo import PPOAgent from ai_traineree.multi_agents.independent import IndependentAgents from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner", "be reset to access env.agents() agents = [] for actor_name in task.agents: obs_space", "IndependentAgents from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner from ai_traineree.tasks import PettingZooTask env = waterworld_v3.env() task", "do but it might take a long time to check. Ain't nobody got", "from ai_traineree.multi_agents.independent import IndependentAgents from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner from ai_traineree.tasks import PettingZooTask env", "they do but it might take a long time to check. Ain't nobody", "agent learns here anything useful, yet. Well, maybe they do but it might", "MultiAgentCycleEnvRunner from ai_traineree.tasks import PettingZooTask env = waterworld_v3.env() task = PettingZooTask(env=env) task.reset() #", "\"\"\" This example is for demonstartion purpose only. No agent learns here anything", "import PettingZooTask env = waterworld_v3.env() task = PettingZooTask(env=env) task.reset() # Needs to be", "for actor_name in task.agents: obs_space = task.observation_spaces[actor_name] action_space = task.action_spaces[actor_name] agents.append(PPOAgent(obs_space, action_space)) multi_agent", "a long time to check. Ain't nobody got time for that. \"\"\" from", "PPOAgent from ai_traineree.multi_agents.independent import IndependentAgents from ai_traineree.runners.multiagent_env_runner import MultiAgentCycleEnvRunner from ai_traineree.tasks import PettingZooTask", "here anything useful, yet. Well, maybe they do but it might take a", "= waterworld_v3.env() task = PettingZooTask(env=env) task.reset() # Needs to be reset to access", "from pettingzoo.sisl import waterworld_v3 from ai_traineree.agents.ppo import PPOAgent from ai_traineree.multi_agents.independent import IndependentAgents from", "got time for that. \"\"\" from pettingzoo.sisl import waterworld_v3 from ai_traineree.agents.ppo import PPOAgent", "purpose only. No agent learns here anything useful, yet. Well, maybe they do", "useful, yet. Well, maybe they do but it might take a long time", "task.reset() # Needs to be reset to access env.agents() agents = [] for", "example is for demonstartion purpose only. No agent learns here anything useful, yet.", "ai_traineree.tasks import PettingZooTask env = waterworld_v3.env() task = PettingZooTask(env=env) task.reset() # Needs to", "This example is for demonstartion purpose only. No agent learns here anything useful,", "pettingzoo.sisl import waterworld_v3 from ai_traineree.agents.ppo import PPOAgent from ai_traineree.multi_agents.independent import IndependentAgents from ai_traineree.runners.multiagent_env_runner", "import waterworld_v3 from ai_traineree.agents.ppo import PPOAgent from ai_traineree.multi_agents.independent import IndependentAgents from ai_traineree.runners.multiagent_env_runner import", "# Needs to be reset to access env.agents() agents = [] for actor_name", "long time to check. Ain't nobody got time for that. \"\"\" from pettingzoo.sisl" ]
[ "= n + 100 print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) while True:", "from empty stack') def main(): queue = Queue() n = 100 print('Empty queue:", "def main(): queue = Queue() n = 100 print('Empty queue: {0}'.format(queue.is_empty())) while queue.count()", "while queue.count() < 5: print('pushing elements: {0}'.format(n)) queue.enqueue(n) n = n + 100", "e: print('Exception: {0}'.format(e)) break print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) if __name__", "count(self): return len(self._list) def is_empty(self): return self.count() == 0 def enqueue(self, item): self._list.append(item)", "except Exception as e: print('Exception: {0}'.format(e)) break print('Number of items: {0}'.format(queue.count())) print('Empty queue:", "self.count() == 0 def enqueue(self, item): self._list.append(item) def dequeue(self): try: return self._list.pop(0) except", "n = 100 print('Empty queue: {0}'.format(queue.is_empty())) while queue.count() < 5: print('pushing elements: {0}'.format(n))", "class Queue(object): def __init__(self): self._list = [] def count(self): return len(self._list) def is_empty(self):", "{0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) while True: try: print('Removing element: {0}'.format(queue.dequeue())) except Exception as", "100 print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) while True: try: print('Removing element:", "== 0 def enqueue(self, item): self._list.append(item) def dequeue(self): try: return self._list.pop(0) except IndexError:", "{0}'.format(queue.is_empty())) while True: try: print('Removing element: {0}'.format(queue.dequeue())) except Exception as e: print('Exception: {0}'.format(e))", "queue.count() < 5: print('pushing elements: {0}'.format(n)) queue.enqueue(n) n = n + 100 print('Number", "self._list = [] def count(self): return len(self._list) def is_empty(self): return self.count() == 0", "{0}'.format(queue.is_empty())) while queue.count() < 5: print('pushing elements: {0}'.format(n)) queue.enqueue(n) n = n +", "{0}'.format(n)) queue.enqueue(n) n = n + 100 print('Number of items: {0}'.format(queue.count())) print('Empty queue:", "print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) while True: try: print('Removing element: {0}'.format(queue.dequeue()))", "print('Removing element: {0}'.format(queue.dequeue())) except Exception as e: print('Exception: {0}'.format(e)) break print('Number of items:", "= 100 print('Empty queue: {0}'.format(queue.is_empty())) while queue.count() < 5: print('pushing elements: {0}'.format(n)) queue.enqueue(n)", "True: try: print('Removing element: {0}'.format(queue.dequeue())) except Exception as e: print('Exception: {0}'.format(e)) break print('Number", "def enqueue(self, item): self._list.append(item) def dequeue(self): try: return self._list.pop(0) except IndexError: raise IndexError('pop", "n + 100 print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) while True: try:", "{0}'.format(e)) break print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) if __name__ == '__main__':", "[] def count(self): return len(self._list) def is_empty(self): return self.count() == 0 def enqueue(self,", "empty stack') def main(): queue = Queue() n = 100 print('Empty queue: {0}'.format(queue.is_empty()))", "is_empty(self): return self.count() == 0 def enqueue(self, item): self._list.append(item) def dequeue(self): try: return", "def count(self): return len(self._list) def is_empty(self): return self.count() == 0 def enqueue(self, item):", "try: return self._list.pop(0) except IndexError: raise IndexError('pop from empty stack') def main(): queue", "__init__(self): self._list = [] def count(self): return len(self._list) def is_empty(self): return self.count() ==", "dequeue(self): try: return self._list.pop(0) except IndexError: raise IndexError('pop from empty stack') def main():", "def __init__(self): self._list = [] def count(self): return len(self._list) def is_empty(self): return self.count()", "def is_empty(self): return self.count() == 0 def enqueue(self, item): self._list.append(item) def dequeue(self): try:", "self._list.append(item) def dequeue(self): try: return self._list.pop(0) except IndexError: raise IndexError('pop from empty stack')", "except IndexError: raise IndexError('pop from empty stack') def main(): queue = Queue() n", "break print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) if __name__ == '__main__': main()", "return len(self._list) def is_empty(self): return self.count() == 0 def enqueue(self, item): self._list.append(item) def", "return self.count() == 0 def enqueue(self, item): self._list.append(item) def dequeue(self): try: return self._list.pop(0)", "print('Empty queue: {0}'.format(queue.is_empty())) while True: try: print('Removing element: {0}'.format(queue.dequeue())) except Exception as e:", "queue: {0}'.format(queue.is_empty())) while True: try: print('Removing element: {0}'.format(queue.dequeue())) except Exception as e: print('Exception:", "print('pushing elements: {0}'.format(n)) queue.enqueue(n) n = n + 100 print('Number of items: {0}'.format(queue.count()))", "stack') def main(): queue = Queue() n = 100 print('Empty queue: {0}'.format(queue.is_empty())) while", "element: {0}'.format(queue.dequeue())) except Exception as e: print('Exception: {0}'.format(e)) break print('Number of items: {0}'.format(queue.count()))", "of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) while True: try: print('Removing element: {0}'.format(queue.dequeue())) except", "n = n + 100 print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) while", "Queue() n = 100 print('Empty queue: {0}'.format(queue.is_empty())) while queue.count() < 5: print('pushing elements:", "100 print('Empty queue: {0}'.format(queue.is_empty())) while queue.count() < 5: print('pushing elements: {0}'.format(n)) queue.enqueue(n) n", "= [] def count(self): return len(self._list) def is_empty(self): return self.count() == 0 def", "return self._list.pop(0) except IndexError: raise IndexError('pop from empty stack') def main(): queue =", "elements: {0}'.format(n)) queue.enqueue(n) n = n + 100 print('Number of items: {0}'.format(queue.count())) print('Empty", "as e: print('Exception: {0}'.format(e)) break print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) if", "queue.enqueue(n) n = n + 100 print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty()))", "0 def enqueue(self, item): self._list.append(item) def dequeue(self): try: return self._list.pop(0) except IndexError: raise", "IndexError: raise IndexError('pop from empty stack') def main(): queue = Queue() n =", "IndexError('pop from empty stack') def main(): queue = Queue() n = 100 print('Empty", "Exception as e: print('Exception: {0}'.format(e)) break print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty()))", "self._list.pop(0) except IndexError: raise IndexError('pop from empty stack') def main(): queue = Queue()", "= Queue() n = 100 print('Empty queue: {0}'.format(queue.is_empty())) while queue.count() < 5: print('pushing", "items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) while True: try: print('Removing element: {0}'.format(queue.dequeue())) except Exception", "item): self._list.append(item) def dequeue(self): try: return self._list.pop(0) except IndexError: raise IndexError('pop from empty", "def dequeue(self): try: return self._list.pop(0) except IndexError: raise IndexError('pop from empty stack') def", "try: print('Removing element: {0}'.format(queue.dequeue())) except Exception as e: print('Exception: {0}'.format(e)) break print('Number of", "5: print('pushing elements: {0}'.format(n)) queue.enqueue(n) n = n + 100 print('Number of items:", "{0}'.format(queue.dequeue())) except Exception as e: print('Exception: {0}'.format(e)) break print('Number of items: {0}'.format(queue.count())) print('Empty", "enqueue(self, item): self._list.append(item) def dequeue(self): try: return self._list.pop(0) except IndexError: raise IndexError('pop from", "while True: try: print('Removing element: {0}'.format(queue.dequeue())) except Exception as e: print('Exception: {0}'.format(e)) break", "< 5: print('pushing elements: {0}'.format(n)) queue.enqueue(n) n = n + 100 print('Number of", "queue: {0}'.format(queue.is_empty())) while queue.count() < 5: print('pushing elements: {0}'.format(n)) queue.enqueue(n) n = n", "Queue(object): def __init__(self): self._list = [] def count(self): return len(self._list) def is_empty(self): return", "+ 100 print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) while True: try: print('Removing", "len(self._list) def is_empty(self): return self.count() == 0 def enqueue(self, item): self._list.append(item) def dequeue(self):", "queue = Queue() n = 100 print('Empty queue: {0}'.format(queue.is_empty())) while queue.count() < 5:", "main(): queue = Queue() n = 100 print('Empty queue: {0}'.format(queue.is_empty())) while queue.count() <", "print('Empty queue: {0}'.format(queue.is_empty())) while queue.count() < 5: print('pushing elements: {0}'.format(n)) queue.enqueue(n) n =", "raise IndexError('pop from empty stack') def main(): queue = Queue() n = 100", "print('Exception: {0}'.format(e)) break print('Number of items: {0}'.format(queue.count())) print('Empty queue: {0}'.format(queue.is_empty())) if __name__ ==" ]
[ "= 0 for l in t: if l in t_map: t_map[l] += 1", "str) -> bool: if len(s) == len(t): s_map = {} t_map = {}", "s_map[l] = 0 for l in t: if l in t_map: t_map[l] +=", "def isAnagram(s: str, t: str) -> bool: if len(s) == len(t): s_map =", "<gh_stars>0 def isAnagram(s: str, t: str) -> bool: if len(s) == len(t): s_map", "0 for l in t: if l in t_map: t_map[l] += 1 else:", "t_map = {} for l in s: if l in s_map: s_map[l] +=", "= 0 for l in s_map: if l not in t_map or s_map[l]", "t: if l in t_map: t_map[l] += 1 else: t_map[l] = 0 for", "in t: if l in t_map: t_map[l] += 1 else: t_map[l] = 0", "0 for l in s_map: if l not in t_map or s_map[l] !=", "+= 1 else: s_map[l] = 0 for l in t: if l in", "for l in s: if l in s_map: s_map[l] += 1 else: s_map[l]", "if l in t_map: t_map[l] += 1 else: t_map[l] = 0 for l", "1 else: t_map[l] = 0 for l in s_map: if l not in", "isAnagram(s: str, t: str) -> bool: if len(s) == len(t): s_map = {}", "t: str) -> bool: if len(s) == len(t): s_map = {} t_map =", "if l in s_map: s_map[l] += 1 else: s_map[l] = 0 for l", "else: s_map[l] = 0 for l in t: if l in t_map: t_map[l]", "s_map: if l not in t_map or s_map[l] != t_map[l]: return False else:", "for l in s_map: if l not in t_map or s_map[l] != t_map[l]:", "in s: if l in s_map: s_map[l] += 1 else: s_map[l] = 0", "t_map: t_map[l] += 1 else: t_map[l] = 0 for l in s_map: if", "in t_map: t_map[l] += 1 else: t_map[l] = 0 for l in s_map:", "= {} t_map = {} for l in s: if l in s_map:", "else: t_map[l] = 0 for l in s_map: if l not in t_map", "len(t): s_map = {} t_map = {} for l in s: if l", "l in t: if l in t_map: t_map[l] += 1 else: t_map[l] =", "bool: if len(s) == len(t): s_map = {} t_map = {} for l", "if l not in t_map or s_map[l] != t_map[l]: return False else: return", "== len(t): s_map = {} t_map = {} for l in s: if", "l in s_map: s_map[l] += 1 else: s_map[l] = 0 for l in", "s_map: s_map[l] += 1 else: s_map[l] = 0 for l in t: if", "t_map[l] = 0 for l in s_map: if l not in t_map or", "s_map = {} t_map = {} for l in s: if l in", "l in t_map: t_map[l] += 1 else: t_map[l] = 0 for l in", "l in s_map: if l not in t_map or s_map[l] != t_map[l]: return", "if len(s) == len(t): s_map = {} t_map = {} for l in", "for l in t: if l in t_map: t_map[l] += 1 else: t_map[l]", "l not in t_map or s_map[l] != t_map[l]: return False else: return False", "s: if l in s_map: s_map[l] += 1 else: s_map[l] = 0 for", "{} t_map = {} for l in s: if l in s_map: s_map[l]", "t_map[l] += 1 else: t_map[l] = 0 for l in s_map: if l", "not in t_map or s_map[l] != t_map[l]: return False else: return False return", "in t_map or s_map[l] != t_map[l]: return False else: return False return True", "str, t: str) -> bool: if len(s) == len(t): s_map = {} t_map", "len(s) == len(t): s_map = {} t_map = {} for l in s:", "in s_map: s_map[l] += 1 else: s_map[l] = 0 for l in t:", "+= 1 else: t_map[l] = 0 for l in s_map: if l not", "l in s: if l in s_map: s_map[l] += 1 else: s_map[l] =", "1 else: s_map[l] = 0 for l in t: if l in t_map:", "in s_map: if l not in t_map or s_map[l] != t_map[l]: return False", "= {} for l in s: if l in s_map: s_map[l] += 1", "{} for l in s: if l in s_map: s_map[l] += 1 else:", "-> bool: if len(s) == len(t): s_map = {} t_map = {} for", "s_map[l] += 1 else: s_map[l] = 0 for l in t: if l" ]
[ "= QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File = QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75,", "class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font = QtGui.QFont()", "MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit = QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\"))", "self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self, MainWindow):", "16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar = QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23)) self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File", "text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig)", "= QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def", "\"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\", \"Image File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\",", "def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\", \"Image File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\",", "changes made in this file will be lost! from PyQt4 import QtCore, QtGui", "\"Image File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\", None)) if __name__", "AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig):", "None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\", None)) if __name__ == \"__main__\": import sys app =", "utf-8 -*- # Form implementation generated from reading ui file 'imgtoxl.ui' # #", "'imgtoxl.ui' # # Created by: PyQt4 UI code generator 4.11.4 # # WARNING!", "self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File = QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File = QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500,", "71, 16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar = QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23)) self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\"))", "AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self,", "QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label = QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 71, 16))", "MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\", \"Image File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\",", "MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font = QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\"))", "QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590, 96))", "70, 583, 23)) self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File = QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23))", "File\", None)) if __name__ == \"__main__\": import sys app = QtGui.QApplication(sys.argv) MainWindow =", "_encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except", "23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File = QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow)", "= QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def", "self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23)) self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File = QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75,", "583, 23)) self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File = QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\"))", "font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit = QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10,", "disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96)", "MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit = QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21))", "self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar = QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23)) self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File =", "self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File = QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0)", "self.progressBar = QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23)) self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File = QtGui.QPushButton(self.centralwidget)", "def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font = QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11)", "\"__main__\": import sys app = QtGui.QApplication(sys.argv) MainWindow = QtGui.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow)", "except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def", "# WARNING! All changes made in this file will be lost! from PyQt4", "self.lineEdit = QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label = QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10,", "-*- coding: utf-8 -*- # Form implementation generated from reading ui file 'imgtoxl.ui'", "23)) self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File = QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File", "QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text,", "75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\",", "411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label = QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 71, 16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar =", "QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self,", "except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text,", "96) MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font = QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget = QtGui.QWidget(MainWindow)", "font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit = QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411,", "implementation generated from reading ui file 'imgtoxl.ui' # # Created by: PyQt4 UI", "text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig):", "= QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit = QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label =", "by: PyQt4 UI code generator 4.11.4 # # WARNING! All changes made in", "MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font = QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded)", "return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text,", "file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8", "self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\", \"Image File\",", "coding: utf-8 -*- # Form implementation generated from reading ui file 'imgtoxl.ui' #", "File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\", None)) if __name__ ==", "generated from reading ui file 'imgtoxl.ui' # # Created by: PyQt4 UI code", "4.11.4 # # WARNING! All changes made in this file will be lost!", "self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label = QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 71, 16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar = QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2,", "import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s", "_fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context,", "self.label.setGeometry(QtCore.QRect(10, 10, 71, 16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar = QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23)) self.progressBar.setProperty(\"value\",", "this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 =", "self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\",", "None)) self.label.setText(_translate(\"MainWindow\", \"Image File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\", None))", "-*- # Form implementation generated from reading ui file 'imgtoxl.ui' # # Created", "== \"__main__\": import sys app = QtGui.QApplication(sys.argv) MainWindow = QtGui.QMainWindow() ui = Ui_MainWindow()", "def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context,", "QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\", \"Image File\", None))", "from reading ui file 'imgtoxl.ui' # # Created by: PyQt4 UI code generator", "try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding =", "self.label = QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 71, 16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar = QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70,", "be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError:", "MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\", \"Image File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save", "return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590,", "made in this file will be lost! from PyQt4 import QtCore, QtGui try:", "self.btn_Open_File = QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File = QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40,", "PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return", "# -*- coding: utf-8 -*- # Form implementation generated from reading ui file", "MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font = QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget =", "# # WARNING! All changes made in this file will be lost! from", "Form implementation generated from reading ui file 'imgtoxl.ui' # # Created by: PyQt4", "try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding)", "self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\", None)) if __name__ == \"__main__\": import", "10, 75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File = QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget)", "if __name__ == \"__main__\": import sys app = QtGui.QApplication(sys.argv) MainWindow = QtGui.QMainWindow() ui", "75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File = QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow)", "text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590,", "generator 4.11.4 # # WARNING! All changes made in this file will be", "__name__ == \"__main__\": import sys app = QtGui.QApplication(sys.argv) MainWindow = QtGui.QMainWindow() ui =", "QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit = QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90,", "s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig,", "QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding", "self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit = QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label", "All changes made in this file will be lost! from PyQt4 import QtCore,", "_translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\"))", "# Created by: PyQt4 UI code generator 4.11.4 # # WARNING! All changes", "_fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8", "None)) if __name__ == \"__main__\": import sys app = QtGui.QApplication(sys.argv) MainWindow = QtGui.QMainWindow()", "lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def", "self.btn_Save_File = QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\"))", "def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow):", "PyQt4 UI code generator 4.11.4 # # WARNING! All changes made in this", "disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class", "setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font = QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font)", "sys app = QtGui.QApplication(sys.argv) MainWindow = QtGui.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())", "import sys app = QtGui.QApplication(sys.argv) MainWindow = QtGui.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show()", "# # Created by: PyQt4 UI code generator 4.11.4 # # WARNING! All", "= QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit = QtGui.QLineEdit(self.centralwidget)", "code generator 4.11.4 # # WARNING! All changes made in this file will", "= QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label = QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 71,", "96)) font = QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit", "self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File = QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\"))", "QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23)) self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File = QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10,", "ui file 'imgtoxl.ui' # # Created by: PyQt4 UI code generator 4.11.4 #", "Created by: PyQt4 UI code generator 4.11.4 # # WARNING! All changes made", "self.label.setText(_translate(\"MainWindow\", \"Image File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\", None)) if", "QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File = QtGui.QPushButton(self.centralwidget) self.btn_Save_File.setGeometry(QtCore.QRect(500, 40, 75, 23))", "text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font", "\"Save File\", None)) if __name__ == \"__main__\": import sys app = QtGui.QApplication(sys.argv) MainWindow", "UI code generator 4.11.4 # # WARNING! All changes made in this file", "self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File = QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File =", "from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s):", "MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font = QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget", "reading ui file 'imgtoxl.ui' # # Created by: PyQt4 UI code generator 4.11.4", "file 'imgtoxl.ui' # # Created by: PyQt4 UI code generator 4.11.4 # #", "= QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError:", "= QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23)) self.progressBar.setProperty(\"value\", 24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File = QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500,", "<filename>imgtoxl.py # -*- coding: utf-8 -*- # Form implementation generated from reading ui", "QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try:", "23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None))", "MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\", \"Image", "10, 411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label = QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 71, 16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar", "disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return", "self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit = QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label = QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10,", "File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\", None)) if __name__ == \"__main__\": import sys app", "disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font =", "return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context,", "self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label = QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 71, 16)) self.label.setObjectName(_fromUtf8(\"label\"))", "None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\", None)) if __name__ == \"__main__\":", "def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return", "QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 71, 16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar = QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23))", "QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context,", "_encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object):", "\"Select File\", None)) self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\", None)) if __name__ == \"__main__\": import sys", "10, 71, 16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar = QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70, 583, 23)) self.progressBar.setProperty(\"value\", 24)", "Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8(\"MainWindow\")) MainWindow.resize(590, 96) MainWindow.setMaximumSize(QtCore.QSize(590, 96)) font = QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\"))", "in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8", "40, 75, 23)) self.btn_Save_File.setObjectName(_fromUtf8(\"btn_Save_File\")) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\",", "self.progressBar.setVisible(0) QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\", \"Image File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\",", "# Form implementation generated from reading ui file 'imgtoxl.ui' # # Created by:", "21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label = QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 71, 16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar = QtGui.QProgressBar(self.centralwidget)", "self.btn_Save_File.setText(_translate(\"MainWindow\", \"Save File\", None)) if __name__ == \"__main__\": import sys app = QtGui.QApplication(sys.argv)", "= QtGui.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(10, 10, 71, 16)) self.label.setObjectName(_fromUtf8(\"label\")) self.progressBar = QtGui.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(2, 70, 583,", "_translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text,", "QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def", "font = QtGui.QFont() font.setFamily(_fromUtf8(\"Calibri\")) font.setPointSize(11) MainWindow.setFont(font) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit =", "will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except", "retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\", \"Image File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select File\", None))", "QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8(\"centralwidget\")) self.lineEdit = QtGui.QLineEdit(self.centralwidget) self.lineEdit.setGeometry(QtCore.QRect(90, 10, 411, 21)) self.lineEdit.setObjectName(_fromUtf8(\"lineEdit\")) self.label = QtGui.QLabel(self.centralwidget)", "WARNING! All changes made in this file will be lost! from PyQt4 import", "QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(\"Plastique\")) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ImgToXL\", None)) self.label.setText(_translate(\"MainWindow\", \"Image File\", None)) self.btn_Open_File.setText(_translate(\"MainWindow\", \"Select", "24) self.progressBar.setObjectName(_fromUtf8(\"progressBar\")) self.btn_Open_File = QtGui.QPushButton(self.centralwidget) self.btn_Open_File.setGeometry(QtCore.QRect(500, 10, 75, 23)) self.btn_Open_File.setObjectName(_fromUtf8(\"btn_Open_File\")) self.btn_Save_File = QtGui.QPushButton(self.centralwidget)" ]
[ "import pandas as pd verbose = False class TestDecorators(TestCase): \"\"\" TestClass for SharedDataFrame", "'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'names': ['Bob', 'Jake', 'Lisa', 'Sue'], 'hire_date': [2008,", "['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa',", "'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date': [2004, 2008, 2012,", "= SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) if verbose: print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2", "SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) if verbose: print(test1.get_dataframe()) with self.assertRaises(NotMergableError): test1.merge_into(test2) if", "test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) if verbose: print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the", "TestClass for SharedDataFrame methods \"\"\" def test_is_mergeable_column_names(self): if verbose: print(\"Testing: is_mergeable_columns\") df1 =", "test_merge_on_false(self): if verbose: print(\"Testing: merge_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group':", "[2004, 2008, 2012, 2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose)", "'Engineering', 'HR']}) df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012,", "test2 = SharedDataFrame(df=df2, verbose=verbose) if verbose: print(test1.get_dataframe()) with self.assertRaises(NotMergableError): test1.merge_into(test2) if verbose: print(test1.get_dataframe())", "'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]})", "test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self): if verbose:", "'Bobby'], 'hire_date': [2004, 2008, 2012, 2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 =", "verbose=verbose) test1.merge_into(test2) if verbose: print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def", "verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self): if verbose: print(\"Testing: is_mergeable_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake',", "test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self): if verbose:", "print(\"Testing: is_mergeable_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering',", "df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) target", "if verbose: print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_false(self): if", "2012, 2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) if verbose:", "= SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self): if verbose: print(\"Testing:", "print(\"Testing: merge_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering',", "'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008,", "'Engineering', 'HR'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 =", "class TestDecorators(TestCase): \"\"\" TestClass for SharedDataFrame methods \"\"\" def test_is_mergeable_column_names(self): if verbose: print(\"Testing:", "'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2,", "target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'hire_date':", "2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) if", "2 DataFrames\") def test_merge_on_common_values(self): if verbose: print(\"Testing: merge_on_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake',", "2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) if verbose:", "import SharedDataFrame from src.Exceptions import * import pandas as pd verbose = False", "test_merge_on_common_values(self): if verbose: print(\"Testing: merge_on_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group':", "the 2 DataFrames\") def test_merge_on_false(self): if verbose: print(\"Testing: merge_false\") df1 = pd.DataFrame({'employee': ['Bob',", "def test_merge_on_false(self): if verbose: print(\"Testing: merge_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],", "'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],", "SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) if verbose: print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully", "'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 =", "2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self): if", "verbose = False class TestDecorators(TestCase): \"\"\" TestClass for SharedDataFrame methods \"\"\" def test_is_mergeable_column_names(self):", "= False class TestDecorators(TestCase): \"\"\" TestClass for SharedDataFrame methods \"\"\" def test_is_mergeable_column_names(self): if", "'Engineering', 'Engineering', 'HR'], 'names': ['Bob', 'Jake', 'Lisa', 'Sue'], 'hire_date': [2008, 2012, 2004, 2014]})", "for SharedDataFrame methods \"\"\" def test_is_mergeable_column_names(self): if verbose: print(\"Testing: is_mergeable_columns\") df1 = pd.DataFrame({'employee':", "merged the 2 DataFrames\") def test_merge_on_common_values(self): if verbose: print(\"Testing: merge_on_values\") df1 = pd.DataFrame({'employee':", "is_mergeable_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})", "= SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) if verbose: print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target),", "'Engineering', 'Engineering', 'HR'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2", "verbose: print(\"Testing: is_mergeable_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering',", "verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self): if verbose: print(\"Testing: is_mergeable_values\") df1", "verbose: print(\"Testing: merge_on_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering',", "['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date': [2004, 2008, 2012, 2014, 2019]}) test1 =", "print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_false(self): if verbose: print(\"Testing:", "2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self): if", "self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self): if verbose: print(\"Testing: is_mergeable_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa',", "def test_is_mergeable_column_names(self): if verbose: print(\"Testing: is_mergeable_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],", "2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target),", "is_mergeable_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})", "'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date': [2004, 2008,", "* import pandas as pd verbose = False class TestDecorators(TestCase): \"\"\" TestClass for", "test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self): if verbose: print(\"Testing: is_mergeable_false\") df1 =", "def test_merge_on_column_names(self): if verbose: print(\"Testing: merge_on_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],", "DataFrames\") def test_merge_on_common_values(self): if verbose: print(\"Testing: merge_on_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa',", "SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self): if verbose: print(\"Testing: merge_on_columns\")", "'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2", "test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self): if verbose: print(\"Testing: merge_on_columns\") df1 =", "['Accounting', 'Engineering', 'Engineering', 'HR'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose)", "pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) test1 = SharedDataFrame(df=df1,", "SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\")", "2012, 2014]}) target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering',", "['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'names': ['Bob', 'Jake', 'Lisa',", "pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'employee':", "= SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self): if verbose: print(\"Testing:", "'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008,", "test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) if verbose: print(test1.get_dataframe()) with self.assertRaises(NotMergableError):", "= pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) target =", "merge_on_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})", "df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) test1", "'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'hire_date': [2008, 2012, 2004, 2014]}) test1", "'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob',", "merge_on_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})", "'Sue'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2,", "merge_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})", "2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) if verbose: print(test1.get_dataframe()) with", "'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 =", "2008, 2012, 2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2))", "'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date':", "'hire_date': [2004, 2008, 2012, 2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2,", "if verbose: print(\"Testing: merge_on_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',", "from src.PyWash import SharedDataFrame from src.Exceptions import * import pandas as pd verbose", "test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) if verbose: print(test1.get_dataframe()) print(target)", "test_is_mergeable_common_values(self): if verbose: print(\"Testing: is_mergeable_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group':", "[2004, 2008, 2012, 2014]}) target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',", "if verbose: print(\"Testing: merge_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',", "'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date':", "pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date': [2004, 2008, 2012, 2014, 2019]}) test1", "print(\"Testing: is_mergeable_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering',", "\"\"\" def test_is_mergeable_column_names(self): if verbose: print(\"Testing: is_mergeable_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa',", "pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) test1 = SharedDataFrame(df=df1,", "False class TestDecorators(TestCase): \"\"\" TestClass for SharedDataFrame methods \"\"\" def test_is_mergeable_column_names(self): if verbose:", "verbose: print(\"Testing: merge_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering',", "['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'hire_date': [2008, 2012, 2004,", "['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'employee': ['Lisa',", "'Sue', 'Bobby'], 'hire_date': [2004, 2008, 2012, 2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2", "src.Exceptions import * import pandas as pd verbose = False class TestDecorators(TestCase): \"\"\"", "'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa',", "is_mergeable_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})", "= SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self): if verbose: print(\"Testing: merge_on_columns\") df1 = pd.DataFrame({'employee':", "2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully", "pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) target = pd.DataFrame({'employee':", "test1.merge_into(test2) if verbose: print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_false(self):", "test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self): if verbose: print(\"Testing: is_mergeable_values\") df1 =", "print(\"Testing: merge_on_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering',", "test_is_mergeable_false(self): if verbose: print(\"Testing: is_mergeable_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group':", "def test_merge_on_common_values(self): if verbose: print(\"Testing: merge_on_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],", "if verbose: print(\"Testing: is_mergeable_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',", "2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) if verbose: print(test1.get_dataframe())", "'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date': [2004, 2008, 2012, 2014, 2019]}) test1 = SharedDataFrame(df=df1,", "import * import pandas as pd verbose = False class TestDecorators(TestCase): \"\"\" TestClass", "'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'employee': ['Lisa', 'Bob',", "SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self): if verbose: print(\"Testing: is_mergeable_false\") df1 = pd.DataFrame({'employee': ['Bob',", "['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) target = pd.DataFrame({'employee': ['Bob',", "= SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self): if verbose: print(\"Testing:", "'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose)", "['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004,", "src.PyWash import SharedDataFrame from src.Exceptions import * import pandas as pd verbose =", "= SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_common_values(self): if", "2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self):", "test_merge_on_column_names(self): if verbose: print(\"Testing: merge_on_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group':", "print(\"Testing: merge_on_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering',", "if verbose: print(\"Testing: is_mergeable_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',", "TestCase from src.PyWash import SharedDataFrame from src.Exceptions import * import pandas as pd", "test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the", "= pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) target =", "SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_common_values(self): if verbose:", "self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_common_values(self): if verbose: print(\"Testing: merge_on_values\") df1", "= SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2", "SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) if verbose: print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\")", "verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) if verbose: print(test1.get_dataframe()) with self.assertRaises(NotMergableError): test1.merge_into(test2) if verbose:", "'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue',", "self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self): if verbose: print(\"Testing: is_mergeable_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa',", "pd verbose = False class TestDecorators(TestCase): \"\"\" TestClass for SharedDataFrame methods \"\"\" def", "test_is_mergeable_column_names(self): if verbose: print(\"Testing: is_mergeable_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group':", "verbose: print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_false(self): if verbose:", "DataFrames\") def test_merge_on_false(self): if verbose: print(\"Testing: merge_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa',", "print(\"Testing: is_mergeable_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering',", "['Accounting', 'Engineering', 'Engineering', 'HR'], 'names': ['Bob', 'Jake', 'Lisa', 'Sue'], 'hire_date': [2008, 2012, 2004,", "SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self): if verbose: print(\"Testing: is_mergeable_values\") df1 = pd.DataFrame({'employee': ['Bob',", "target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'names':", "from unittest import TestCase from src.PyWash import SharedDataFrame from src.Exceptions import * import", "= pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) test1 =", "2008, 2012, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def", "'HR']}) df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]})", "verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def", "'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date': [2004,", "2 DataFrames\") def test_merge_on_false(self): if verbose: print(\"Testing: merge_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake',", "import TestCase from src.PyWash import SharedDataFrame from src.Exceptions import * import pandas as", "[2004, 2008, 2012, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2))", "= pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date': [2004, 2008, 2012, 2014, 2019]})", "'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],", "'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) target = pd.DataFrame({'employee': ['Bob', 'Jake',", "2014]}) target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'],", "if verbose: print(\"Testing: merge_on_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',", "pandas as pd verbose = False class TestDecorators(TestCase): \"\"\" TestClass for SharedDataFrame methods", "'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake',", "test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_common_values(self): if verbose: print(\"Testing: merge_on_values\")", "2008, 2012, 2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) if", "SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self): if verbose: print(\"Testing: is_mergeable_false\")", "SharedDataFrame from src.Exceptions import * import pandas as pd verbose = False class", "'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'],", "as pd verbose = False class TestDecorators(TestCase): \"\"\" TestClass for SharedDataFrame methods \"\"\"", "'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'hire_date': [2008, 2012, 2004, 2014]})", "'HR'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2,", "= SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self): if verbose: print(\"Testing: is_mergeable_false\") df1 = pd.DataFrame({'employee':", "df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2", "= pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 =", "SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self): if verbose: print(\"Testing: merge_on_columns\") df1 = pd.DataFrame({'employee': ['Bob',", "= pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'names': ['Bob',", "['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose)", "SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self): if verbose: print(\"Testing: is_mergeable_values\")", "= SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) if verbose: print(test1.get_dataframe()) with self.assertRaises(NotMergableError): test1.merge_into(test2)", "pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names':", "verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self): if verbose: print(\"Testing: merge_on_columns\") df1", "2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged", "2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self): if", "verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self): if verbose: print(\"Testing: merge_on_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake',", "the 2 DataFrames\") def test_merge_on_common_values(self): if verbose: print(\"Testing: merge_on_values\") df1 = pd.DataFrame({'employee': ['Bob',", "['Bob', 'Jake', 'Lisa', 'Sue'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose)", "TestDecorators(TestCase): \"\"\" TestClass for SharedDataFrame methods \"\"\" def test_is_mergeable_column_names(self): if verbose: print(\"Testing: is_mergeable_columns\")", "self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_false(self): if verbose: print(\"Testing: merge_false\") df1", "verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self): if verbose: print(\"Testing: is_mergeable_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake',", "df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date': [2004, 2008, 2012, 2014,", "methods \"\"\" def test_is_mergeable_column_names(self): if verbose: print(\"Testing: is_mergeable_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake',", "verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) if verbose: print(test1.get_dataframe()) print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged", "self.assertFalse(test1.is_mergeable(test2)) def test_merge_on_column_names(self): if verbose: print(\"Testing: merge_on_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa',", "2008, 2012, 2014]}) target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering',", "['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue', 'Bobby'], 'hire_date':", "print(target) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_false(self): if verbose: print(\"Testing: merge_false\")", "'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'names': ['Bob', 'Jake', 'Lisa', 'Sue'], 'hire_date':", "verbose: print(\"Testing: is_mergeable_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering',", "['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004,", "test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_common_values(self):", "'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1,", "merged the 2 DataFrames\") def test_merge_on_false(self): if verbose: print(\"Testing: merge_false\") df1 = pd.DataFrame({'employee':", "verbose: print(\"Testing: merge_on_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering',", "2012, 2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertFalse(test1.is_mergeable(test2)) def", "verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self): if verbose: print(\"Testing: is_mergeable_false\") df1", "2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) if verbose: print(test1.get_dataframe())", "SharedDataFrame methods \"\"\" def test_is_mergeable_column_names(self): if verbose: print(\"Testing: is_mergeable_columns\") df1 = pd.DataFrame({'employee': ['Bob',", "= SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self): if verbose: print(\"Testing: is_mergeable_values\") df1 = pd.DataFrame({'employee':", "'hire_date': [2004, 2008, 2012, 2014]}) target = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group':", "'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'names': ['Bob', 'Jake', 'Lisa', 'Sue'], 'hire_date': [2008, 2012,", "pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) target = pd.DataFrame({'employee':", "[2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) test1.merge_into(test2)", "'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012,", "df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) test1", "'hire_date': [2004, 2008, 2012, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose)", "test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self): if verbose:", "2012, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_false(self):", "2012, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 = SharedDataFrame(df=df2, verbose=verbose) self.assertTrue(test1.is_mergeable(test2)) def test_is_mergeable_common_values(self):", "df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) target", "unittest import TestCase from src.PyWash import SharedDataFrame from src.Exceptions import * import pandas", "'Engineering', 'HR'], 'names': ['Bob', 'Jake', 'Lisa', 'Sue'], 'hire_date': [2008, 2012, 2004, 2014]}) test1", "verbose: print(\"Testing: is_mergeable_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering',", "verbose=verbose) test1.merge_into(test2) self.assertTrue(test1.get_dataframe().equals(target), \"Successfully merged the 2 DataFrames\") def test_merge_on_common_values(self): if verbose: print(\"Testing:", "def test_is_mergeable_common_values(self): if verbose: print(\"Testing: is_mergeable_values\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],", "'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake',", "def test_is_mergeable_false(self): if verbose: print(\"Testing: is_mergeable_false\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],", "\"\"\" TestClass for SharedDataFrame methods \"\"\" def test_is_mergeable_column_names(self): if verbose: print(\"Testing: is_mergeable_columns\") df1", "'Lisa', 'Sue'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2 =", "'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'names': ['Bob', 'Jake', 'Lisa', 'Sue'],", "\"Successfully merged the 2 DataFrames\") def test_merge_on_false(self): if verbose: print(\"Testing: merge_false\") df1 =", "from src.Exceptions import * import pandas as pd verbose = False class TestDecorators(TestCase):", "= pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'], 'hire_date': [2004, 2008, 2012, 2014]}) test1 =", "= pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'hire_date': [2008,", "\"Successfully merged the 2 DataFrames\") def test_merge_on_common_values(self): if verbose: print(\"Testing: merge_on_values\") df1 =", "'Jake', 'Lisa', 'Sue'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1, verbose=verbose) test2", "'names': ['Bob', 'Jake', 'Lisa', 'Sue'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 = SharedDataFrame(df=df1,", "pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'names': ['Bob', 'Jake',", "if verbose: print(\"Testing: is_mergeable_columns\") df1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting',", "'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']}) df2 = pd.DataFrame({'names': ['Lisa', 'Bob', 'Jake', 'Sue'],", "'Jake', 'Sue', 'Bobby'], 'hire_date': [2004, 2008, 2012, 2014, 2019]}) test1 = SharedDataFrame(df=df1, verbose=verbose)", "pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'], 'group': ['Accounting', 'Engineering', 'Engineering', 'HR'], 'hire_date': [2008, 2012,", "'HR'], 'names': ['Bob', 'Jake', 'Lisa', 'Sue'], 'hire_date': [2008, 2012, 2004, 2014]}) test1 =" ]
[ "\"\"\" Get player action for it's string representation. :param action_name: The action's string", "0 STAND = 1 YES = 2 NO = 3 @staticmethod def get_action(action_name:", "Enumeration of possible player actions. \"\"\" HIT = 0 STAND = 1 YES", "The action's string representation :return: The corresponding player action. \"\"\" action_name = action_name.lower()", "str) -> PlayerAction: \"\"\" Get player action for it's string representation. :param action_name:", "__version__ = '1.0' __license__ = 'MIT' class PlayerAction(Enum): \"\"\" Enumeration of possible player", "'<EMAIL>' __version__ = '1.0' __license__ = 'MIT' class PlayerAction(Enum): \"\"\" Enumeration of possible", "\"\"\" HIT = 0 STAND = 1 YES = 2 NO = 3", "== \"stand\": return PlayerAction.STAND elif action_name == \"yes\": return PlayerAction.YES elif action_name ==", "elif action_name == \"stand\": return PlayerAction.STAND elif action_name == \"yes\": return PlayerAction.YES elif", "corresponding player action. \"\"\" action_name = action_name.lower() if action_name == \"hit\": return PlayerAction.HIT", "action_name.lower() if action_name == \"hit\": return PlayerAction.HIT elif action_name == \"stand\": return PlayerAction.STAND", "= 2 NO = 3 @staticmethod def get_action(action_name: str) -> PlayerAction: \"\"\" Get", "action_name == \"yes\": return PlayerAction.YES elif action_name == \"no\": return PlayerAction.NO else: return", "player actions. \"\"\" HIT = 0 STAND = 1 YES = 2 NO", "possible player actions. \"\"\" HIT = 0 STAND = 1 YES = 2", "2 NO = 3 @staticmethod def get_action(action_name: str) -> PlayerAction: \"\"\" Get player", "action for it's string representation. :param action_name: The action's string representation :return: The", "'MIT' class PlayerAction(Enum): \"\"\" Enumeration of possible player actions. \"\"\" HIT = 0", "STAND = 1 YES = 2 NO = 3 @staticmethod def get_action(action_name: str)", "for it's string representation. :param action_name: The action's string representation :return: The corresponding", "HIT = 0 STAND = 1 YES = 2 NO = 3 @staticmethod", "action's string representation :return: The corresponding player action. \"\"\" action_name = action_name.lower() if", "PlayerAction(Enum): \"\"\" Enumeration of possible player actions. \"\"\" HIT = 0 STAND =", "actions. \"\"\" HIT = 0 STAND = 1 YES = 2 NO =", "Get player action for it's string representation. :param action_name: The action's string representation", "\"\"\" action_name = action_name.lower() if action_name == \"hit\": return PlayerAction.HIT elif action_name ==", "= action_name.lower() if action_name == \"hit\": return PlayerAction.HIT elif action_name == \"stand\": return", "@staticmethod def get_action(action_name: str) -> PlayerAction: \"\"\" Get player action for it's string", "PlayerAction: \"\"\" Get player action for it's string representation. :param action_name: The action's", "= '<EMAIL>' __version__ = '1.0' __license__ = 'MIT' class PlayerAction(Enum): \"\"\" Enumeration of", "== \"hit\": return PlayerAction.HIT elif action_name == \"stand\": return PlayerAction.STAND elif action_name ==", "1 YES = 2 NO = 3 @staticmethod def get_action(action_name: str) -> PlayerAction:", "= '1.0' __license__ = 'MIT' class PlayerAction(Enum): \"\"\" Enumeration of possible player actions.", ". import PlayerAction __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '1.0' __license__", "PlayerAction.HIT elif action_name == \"stand\": return PlayerAction.STAND elif action_name == \"yes\": return PlayerAction.YES", "get_action(action_name: str) -> PlayerAction: \"\"\" Get player action for it's string representation. :param", "enum import Enum from . import PlayerAction __author__ = '<NAME>' __email__ = '<EMAIL>'", "YES = 2 NO = 3 @staticmethod def get_action(action_name: str) -> PlayerAction: \"\"\"", "The corresponding player action. \"\"\" action_name = action_name.lower() if action_name == \"hit\": return", "'<NAME>' __email__ = '<EMAIL>' __version__ = '1.0' __license__ = 'MIT' class PlayerAction(Enum): \"\"\"", "= 3 @staticmethod def get_action(action_name: str) -> PlayerAction: \"\"\" Get player action for", "action_name = action_name.lower() if action_name == \"hit\": return PlayerAction.HIT elif action_name == \"stand\":", "return PlayerAction.STAND elif action_name == \"yes\": return PlayerAction.YES elif action_name == \"no\": return", "if action_name == \"hit\": return PlayerAction.HIT elif action_name == \"stand\": return PlayerAction.STAND elif", "import Enum from . import PlayerAction __author__ = '<NAME>' __email__ = '<EMAIL>' __version__", "__license__ = 'MIT' class PlayerAction(Enum): \"\"\" Enumeration of possible player actions. \"\"\" HIT", "action_name == \"stand\": return PlayerAction.STAND elif action_name == \"yes\": return PlayerAction.YES elif action_name", "representation :return: The corresponding player action. \"\"\" action_name = action_name.lower() if action_name ==", "Enum from . import PlayerAction __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ =", "== \"yes\": return PlayerAction.YES elif action_name == \"no\": return PlayerAction.NO else: return None", "PlayerAction __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '1.0' __license__ = 'MIT'", ":return: The corresponding player action. \"\"\" action_name = action_name.lower() if action_name == \"hit\":", "3 @staticmethod def get_action(action_name: str) -> PlayerAction: \"\"\" Get player action for it's", "= 'MIT' class PlayerAction(Enum): \"\"\" Enumeration of possible player actions. \"\"\" HIT =", "class PlayerAction(Enum): \"\"\" Enumeration of possible player actions. \"\"\" HIT = 0 STAND", "__email__ = '<EMAIL>' __version__ = '1.0' __license__ = 'MIT' class PlayerAction(Enum): \"\"\" Enumeration", "return PlayerAction.HIT elif action_name == \"stand\": return PlayerAction.STAND elif action_name == \"yes\": return", "__author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '1.0' __license__ = 'MIT' class", "player action for it's string representation. :param action_name: The action's string representation :return:", "import PlayerAction __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '1.0' __license__ =", "NO = 3 @staticmethod def get_action(action_name: str) -> PlayerAction: \"\"\" Get player action", "\"hit\": return PlayerAction.HIT elif action_name == \"stand\": return PlayerAction.STAND elif action_name == \"yes\":", "action_name: The action's string representation :return: The corresponding player action. \"\"\" action_name =", "\"stand\": return PlayerAction.STAND elif action_name == \"yes\": return PlayerAction.YES elif action_name == \"no\":", "def get_action(action_name: str) -> PlayerAction: \"\"\" Get player action for it's string representation.", "player action. \"\"\" action_name = action_name.lower() if action_name == \"hit\": return PlayerAction.HIT elif", "representation. :param action_name: The action's string representation :return: The corresponding player action. \"\"\"", "from . import PlayerAction __author__ = '<NAME>' __email__ = '<EMAIL>' __version__ = '1.0'", "= 1 YES = 2 NO = 3 @staticmethod def get_action(action_name: str) ->", "string representation. :param action_name: The action's string representation :return: The corresponding player action.", "= 0 STAND = 1 YES = 2 NO = 3 @staticmethod def", "action. \"\"\" action_name = action_name.lower() if action_name == \"hit\": return PlayerAction.HIT elif action_name", "= '<NAME>' __email__ = '<EMAIL>' __version__ = '1.0' __license__ = 'MIT' class PlayerAction(Enum):", "\"\"\" Enumeration of possible player actions. \"\"\" HIT = 0 STAND = 1", "from enum import Enum from . import PlayerAction __author__ = '<NAME>' __email__ =", "string representation :return: The corresponding player action. \"\"\" action_name = action_name.lower() if action_name", "PlayerAction.STAND elif action_name == \"yes\": return PlayerAction.YES elif action_name == \"no\": return PlayerAction.NO", "action_name == \"hit\": return PlayerAction.HIT elif action_name == \"stand\": return PlayerAction.STAND elif action_name", "-> PlayerAction: \"\"\" Get player action for it's string representation. :param action_name: The", ":param action_name: The action's string representation :return: The corresponding player action. \"\"\" action_name", "it's string representation. :param action_name: The action's string representation :return: The corresponding player", "elif action_name == \"yes\": return PlayerAction.YES elif action_name == \"no\": return PlayerAction.NO else:", "of possible player actions. \"\"\" HIT = 0 STAND = 1 YES =", "'1.0' __license__ = 'MIT' class PlayerAction(Enum): \"\"\" Enumeration of possible player actions. \"\"\"" ]
[ "'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF = 'config.urls'", "= True # By default, be at least somewhat secure with our session", "using https SESSION_COOKIE_SECURE = False # Absolute filesystem path to the directory that", "paths. ] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES = { 'default': {", "path to the directory that will hold user-uploaded files. MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')", "Build paths inside the project like this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)", "'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # third party middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS = [", "to the directory that will hold user-uploaded files. MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') #", "\"../../../\") # Take environment variables from .env file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) # SECURITY WARNING:", "'django.middleware.clickjacking.XFrameOptionsMiddleware', # third party middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel',", "= [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] # cacheable", "'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") env = environ.Env() # reading .env file", "apps #Application base 'Application', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware',", "like this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") env = environ.Env()", "from MEDIA_ROOT. Make sure to use a MEDIA_URL = '/media/' # Absolute path", "INSTALLED_APPS = [ #Django default apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #Third", "True) ) # Build paths inside the project like this: BASE_DIR / 'subdir'.", "if you are using https SESSION_COOKIE_SECURE = False # Absolute filesystem path to", "your TEMPLATE_DIRS here os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'), ], 'APP_DIRS': True, 'OPTIONS':", "STATIC_ROOT = 'static/' # URL prefix for static files. STATIC_URL = '/static/' #", "TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # insert your TEMPLATE_DIRS here", "], 'APP_DIRS': True, 'OPTIONS': { 'debug': DEBUG, 'context_processors': [ # Default context processors", "env = environ.Env() # reading .env file environ.Env.read_env() env = environ.Env( # set", "to False # on all server instances and True only for development. DEBUG", "'debug': DEBUG, 'context_processors': [ # Default context processors 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', #", "project like this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") env =", "# insert your TEMPLATE_DIRS here os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'), ], 'APP_DIRS':", "os.path.abspath(os.path.dirname(__file__) + \"../../../\") # Take environment variables from .env file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) #", "static files STATICFILES_DIRS = [ # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".", "'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF = 'config.urls' TEMPLATES", "] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ #", "'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third party finders 'compressor.finders.CompressorFinder', ) TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' INTERNAL_IPS =", "MEDIA_URL = '/media/' # Absolute path to the directory static files should be", "= [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # insert your TEMPLATE_DIRS here os.path.join(PROJECT_ROOT,", "not relative paths. ] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES = {", "\"127.0.0.1\"] # Application definition INSTALLED_APPS = [ #Django default apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes',", "} # Argon2 password hashing PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', #", "{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, {", "MEDIA_ROOT. Make sure to use a MEDIA_URL = '/media/' # Absolute path to", "] # cacheable files and compression support STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS = (", "}, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default':", "{ 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS", "['media', 'admin', 'static'] # Build paths inside the project like this: BASE_DIR /", "/ 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") # Take environment variables from .env", "locations of static files STATICFILES_DIRS = [ # Put strings here, like \"/home/html/static\"", "# Custom context processors here #'config.context_processors.custom_context_processor', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application'", "[ # Default context processors 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # Custom context processors", "[\"localhost\", \"127.0.0.1\"] # Application definition INSTALLED_APPS = [ #Django default apps 'django.contrib.admin', 'django.contrib.auth',", "env(\"SECRET_KEY\", default=\"unsafe-secret-key\") # SECURITY WARNING: don't run with debug turned on in production!", "= 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # By", "Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', }", "set casting, default value DEBUG=(bool, True) ) # Build paths inside the project", "[ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # insert your TEMPLATE_DIRS here os.path.join(PROJECT_ROOT, 'templates'),", "SECURITY WARNING: don't run with debug turned on in production! # Debugging displays", "definition INSTALLED_APPS = [ #Django default apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles',", "# set casting, default value DEBUG=(bool, True) ) # Build paths inside the", "'DIRS': [ # insert your TEMPLATE_DIRS here os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'),", "context processors 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # Custom context processors here #'config.context_processors.custom_context_processor', ],", "} } # Argon2 password hashing PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',", "reading .env file environ.Env.read_env() env = environ.Env( # set casting, default value DEBUG=(bool,", "DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel',", "user-uploaded files. MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') # URL that handles the media served", "TEMPLATE_DIRS here os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'), ], 'APP_DIRS': True, 'OPTIONS': {", "default finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third party finders 'compressor.finders.CompressorFinder', ) TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'", "os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'), ], 'APP_DIRS': True, 'OPTIONS': { 'debug': DEBUG, 'context_processors': [", "absolute paths, not relative paths. ] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES", "'config.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME':", "environment variables from .env file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) # SECURITY WARNING: keep the secret", "processors 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # Custom context processors here #'config.context_processors.custom_context_processor', ], },", "'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") # Take environment variables from .env file", "env('DEBUG') ALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"] # Application definition INSTALLED_APPS = [ #Django default", "on Windows. # Don't forget to use absolute paths, not relative paths. ]", "use forward slashes, even on Windows. # Don't forget to use absolute paths,", "the directory static files should be collected to. STATIC_ROOT = 'static/' # URL", "https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, {", "Custom context processors here #'config.context_processors.custom_context_processor', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' #", "file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) # SECURITY WARNING: keep the secret key used in production", "development. DEBUG = env('DEBUG') ALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"] # Application definition INSTALLED_APPS =", "= '/static/' # Additional locations of static files STATICFILES_DIRS = [ # Put", "'compressor', 'django_nose', 'django_extensions', 'debug_toolbar', #Local apps #Application base 'Application', ] MIDDLEWARE = [", "directory that will hold user-uploaded files. MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') # URL that", "even on Windows. # Don't forget to use absolute paths, not relative paths.", "DEBUG, 'context_processors': [ # Default context processors 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # Custom", "= [ # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\". # Always use", "relative paths. ] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES = { 'default':", "'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF =", "for development. DEBUG = env('DEBUG') ALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"] # Application definition INSTALLED_APPS", "'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third party finders 'compressor.finders.CompressorFinder', ) TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' INTERNAL_IPS = ['127.0.0.1']", "files STATICFILES_DIRS = [ # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\". #", "] DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel',", "Put strings here, like \"/home/html/static\" or \"C:/www/django/static\". # Always use forward slashes, even", "'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # third party middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS", "# Put strings here, like \"/home/html/static\" or \"C:/www/django/static\". # Always use forward slashes,", "#'config.context_processors.custom_context_processor', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES", "= [\"localhost\", \"127.0.0.1\"] # Application definition INSTALLED_APPS = [ #Django default apps 'django.contrib.admin',", "[ # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\". # Always use forward", "# Application definition INSTALLED_APPS = [ #Django default apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions',", "= [ #Django default apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #Third party", ".env file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) # SECURITY WARNING: keep the secret key used in", "= { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } } # Password validation", "use a MEDIA_URL = '/media/' # Absolute path to the directory static files", "displays nice error messages, but leaks memory. Set this to False # on", "}, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N", "or \"C:/www/django/static\". # Always use forward slashes, even on Windows. # Don't forget", "'django.template.backends.django.DjangoTemplates', 'DIRS': [ # insert your TEMPLATE_DIRS here os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT,", "= True USE_L10N = True USE_TZ = True # By default, be at", "import Path SUPPORTED_NONLOCALES = ['media', 'admin', 'static'] # Build paths inside the project", "'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #Third party apps 'compressor', 'django_nose', 'django_extensions', 'debug_toolbar', #Local", "Debugging displays nice error messages, but leaks memory. Set this to False #", "'APP_DIRS': True, 'OPTIONS': { 'debug': DEBUG, 'context_processors': [ # Default context processors 'django.template.context_processors.debug',", "https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } } #", "# URL that handles the media served from MEDIA_ROOT. Make sure to use", "# Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, {", "} } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',", "ALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"] # Application definition INSTALLED_APPS = [ #Django default apps", "'127.0.0.1:11211', } } # Argon2 password hashing PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',", "'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates',", "the directory that will hold user-uploaded files. MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') # URL", "'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel',", "[ # insert your TEMPLATE_DIRS here os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'), ],", "environ.Env( # set casting, default value DEBUG=(bool, True) ) # Build paths inside", "ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # insert", "with debug turned on in production! # Debugging displays nice error messages, but", "'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # insert your TEMPLATE_DIRS", "cacheable files and compression support STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS = ( # django", "django contrib default finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third party finders 'compressor.finders.CompressorFinder', ) TEST_RUNNER", "'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # Custom context processors here #'config.context_processors.custom_context_processor', ], }, },", "session cookies. SESSION_COOKIE_HTTPONLY = True # Set this to true if you are", "in production! # Debugging displays nice error messages, but leaks memory. Set this", "#Local apps #Application base 'Application', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware',", "directory static files should be collected to. STATIC_ROOT = 'static/' # URL prefix", "'.env')) # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY", "Don't forget to use absolute paths, not relative paths. ] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'", "# https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N =", "secret! SECRET_KEY = env(\"SECRET_KEY\", default=\"unsafe-secret-key\") # SECURITY WARNING: don't run with debug turned", "filesystem path to the directory that will hold user-uploaded files. MEDIA_ROOT = os.path.join(PROJECT_ROOT,", "password hashing PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher',", "'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # insert your TEMPLATE_DIRS here os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'),", "'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # Custom context processors here #'config.context_processors.custom_context_processor', ], }, }, ] WSGI_APPLICATION", "# https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } }", "STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS = ( # django contrib default finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder',", "= 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS = ( # django contrib default finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', #", "PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") # Take environment variables from .env file environ.Env.read_env(os.path.join(PROJECT_ROOT,", "insert your TEMPLATE_DIRS here os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'), ], 'APP_DIRS': True,", "like this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") # Take environment", "'django.contrib.staticfiles', #Third party apps 'compressor', 'django_nose', 'django_extensions', 'debug_toolbar', #Local apps #Application base 'Application',", "SESSION_COOKIE_SECURE = False # Absolute filesystem path to the directory that will hold", "\"../../../\") env = environ.Env() # reading .env file environ.Env.read_env() env = environ.Env( #", "files. MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') # URL that handles the media served from", "'OPTIONS': { 'debug': DEBUG, 'context_processors': [ # Default context processors 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth',", "USE_L10N = True USE_TZ = True # By default, be at least somewhat", "# Don't forget to use absolute paths, not relative paths. ] DEFAULT_AUTO_FIELD =", "hashing PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ]", "False # on all server instances and True only for development. DEBUG =", "static files. STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS =", "{ 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } # Argon2 password hashing PASSWORD_HASHERS =", "[ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },", "to true if you are using https SESSION_COOKIE_SECURE = False # Absolute filesystem", "'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # Custom context processors here #'config.context_processors.custom_context_processor', ], }, }, ]", "+ \"../../../\") env = environ.Env() # reading .env file environ.Env.read_env() env = environ.Env(", "nice error messages, but leaks memory. Set this to False # on all", "}, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = {", "secret key used in production secret! SECRET_KEY = env(\"SECRET_KEY\", default=\"unsafe-secret-key\") # SECURITY WARNING:", "'django.db.models.BigAutoField' # Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211',", "like \"/home/html/static\" or \"C:/www/django/static\". # Always use forward slashes, even on Windows. #", "'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [", "'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # third party middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ]", "[ #Django default apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #Third party apps", "Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } }", "'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] #", "be at least somewhat secure with our session cookies. SESSION_COOKIE_HTTPONLY = True #", "'static/' # URL prefix for static files. STATIC_URL = '/static/' # Additional locations", "'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } # Argon2 password hashing PASSWORD_HASHERS = [", "messages, but leaks memory. Set this to False # on all server instances", "'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # third party middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS =", "# Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3',", "Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME':", "= 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # insert your", "'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'), ], 'APP_DIRS': True, 'OPTIONS': { 'debug': DEBUG, 'context_processors':", "compression support STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS = ( # django contrib default finders", "# Default context processors 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # Custom context processors here", "{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE", "}, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us'", "that handles the media served from MEDIA_ROOT. Make sure to use a MEDIA_URL", "server instances and True only for development. DEBUG = env('DEBUG') ALLOWED_HOSTS = [\"localhost\",", "'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # third party middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware',", "but leaks memory. Set this to False # on all server instances and", "import environ from pathlib import Path SUPPORTED_NONLOCALES = ['media', 'admin', 'static'] # Build", "from .env file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) # SECURITY WARNING: keep the secret key used", "WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE':", "processors here #'config.context_processors.custom_context_processor', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database #", "'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # third party middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.history.HistoryPanel',", "value DEBUG=(bool, True) ) # Build paths inside the project like this: BASE_DIR", "{ 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } } # Password validation #", "# By default, be at least somewhat secure with our session cookies. SESSION_COOKIE_HTTPONLY", "inside the project like this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\")", "\"C:/www/django/static\". # Always use forward slashes, even on Windows. # Don't forget to", "support STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS = ( # django contrib default finders 'django.contrib.staticfiles.finders.FileSystemFinder',", "to use absolute paths, not relative paths. ] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Memorycached", "= [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # third party middleware", "Path SUPPORTED_NONLOCALES = ['media', 'admin', 'static'] # Build paths inside the project like", "'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF = 'config.urls' TEMPLATES =", "}, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization #", "# SECURITY WARNING: don't run with debug turned on in production! # Debugging", "= 'static/' # URL prefix for static files. STATIC_URL = '/static/' # Additional", "SECRET_KEY = env(\"SECRET_KEY\", default=\"unsafe-secret-key\") # SECURITY WARNING: don't run with debug turned on", "secure with our session cookies. SESSION_COOKIE_HTTPONLY = True # Set this to true", "this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") # Take environment variables", "in production secret! SECRET_KEY = env(\"SECRET_KEY\", default=\"unsafe-secret-key\") # SECURITY WARNING: don't run with", "# reading .env file environ.Env.read_env() env = environ.Env( # set casting, default value", "true if you are using https SESSION_COOKIE_SECURE = False # Absolute filesystem path", "= '/media/' # Absolute path to the directory static files should be collected", "BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") # Take environment variables from", "'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND':", "to. STATIC_ROOT = 'static/' # URL prefix for static files. STATIC_URL = '/static/'", "# Argon2 password hashing PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom", "= [ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel',", "'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } # Argon2 password hashing PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher',", "Application definition INSTALLED_APPS = [ #Django default apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages',", "], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES =", "Argon2 password hashing PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom hasher", "cookies. SESSION_COOKIE_HTTPONLY = True # Set this to true if you are using", "Set this to true if you are using https SESSION_COOKIE_SECURE = False #", "environ.Env.read_env() env = environ.Env( # set casting, default value DEBUG=(bool, True) ) #", "https SESSION_COOKIE_SECURE = False # Absolute filesystem path to the directory that will", "environ.Env() # reading .env file environ.Env.read_env() env = environ.Env( # set casting, default", "'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [", "PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] #", "# on all server instances and True only for development. DEBUG = env('DEBUG')", "be collected to. STATIC_ROOT = 'static/' # URL prefix for static files. STATIC_URL", "environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) # SECURITY WARNING: keep the secret key used in production secret!", "#Application base 'Application', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware',", "forget to use absolute paths, not relative paths. ] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' #", "= 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3',", "path to the directory static files should be collected to. STATIC_ROOT = 'static/'", "prefix for static files. STATIC_URL = '/static/' # Additional locations of static files", "the project like this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") env", "forward slashes, even on Windows. # Don't forget to use absolute paths, not", "'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #Third party apps 'compressor', 'django_nose', 'django_extensions', 'debug_toolbar',", "use absolute paths, not relative paths. ] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache'", "'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME':", "Make sure to use a MEDIA_URL = '/media/' # Absolute path to the", "hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] # cacheable files and compression support STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS", "from pathlib import Path SUPPORTED_NONLOCALES = ['media', 'admin', 'static'] # Build paths inside", "] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N =", "default apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #Third party apps 'compressor', 'django_nose',", "least somewhat secure with our session cookies. SESSION_COOKIE_HTTPONLY = True # Set this", "Default context processors 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # Custom context processors here #'config.context_processors.custom_context_processor',", "paths, not relative paths. ] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES =", "on all server instances and True only for development. DEBUG = env('DEBUG') ALLOWED_HOSTS", "Windows. # Don't forget to use absolute paths, not relative paths. ] DEFAULT_AUTO_FIELD", "WARNING: keep the secret key used in production secret! SECRET_KEY = env(\"SECRET_KEY\", default=\"unsafe-secret-key\")", "'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators", "TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True #", "env = environ.Env( # set casting, default value DEBUG=(bool, True) ) # Build", "= env('DEBUG') ALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"] # Application definition INSTALLED_APPS = [ #Django", "}, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },", "debug turned on in production! # Debugging displays nice error messages, but leaks", "handles the media served from MEDIA_ROOT. Make sure to use a MEDIA_URL =", "served from MEDIA_ROOT. Make sure to use a MEDIA_URL = '/media/' # Absolute", "Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] # cacheable files and compression support STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'", "pathlib import Path SUPPORTED_NONLOCALES = ['media', 'admin', 'static'] # Build paths inside the", "# Set this to true if you are using https SESSION_COOKIE_SECURE = False", "'Application', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', #", "DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',", "'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #Third party apps 'compressor', 'django_nose', 'django_extensions', 'debug_toolbar', #Local apps", "# URL prefix for static files. STATIC_URL = '/static/' # Additional locations of", "= environ.Env( # set casting, default value DEBUG=(bool, True) ) # Build paths", "media served from MEDIA_ROOT. Make sure to use a MEDIA_URL = '/media/' #", "'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC'", "strings here, like \"/home/html/static\" or \"C:/www/django/static\". # Always use forward slashes, even on", ".env file environ.Env.read_env() env = environ.Env( # set casting, default value DEBUG=(bool, True)", "middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel',", "'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] # cacheable files and compression", "'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel',", "SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } #", "MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') # URL that handles the media served from MEDIA_ROOT.", "= ['media', 'admin', 'static'] # Build paths inside the project like this: BASE_DIR", "will hold user-uploaded files. MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') # URL that handles the", "should be collected to. STATIC_ROOT = 'static/' # URL prefix for static files.", "'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE =", "'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ {", "True, 'OPTIONS': { 'debug': DEBUG, 'context_processors': [ # Default context processors 'django.template.context_processors.debug', 'django.template.context_processors.request',", "'/static/' # Additional locations of static files STATICFILES_DIRS = [ # Put strings", "By default, be at least somewhat secure with our session cookies. SESSION_COOKIE_HTTPONLY =", "'admin', 'static'] # Build paths inside the project like this: BASE_DIR / 'subdir'.", "# Debugging displays nice error messages, but leaks memory. Set this to False", "files should be collected to. STATIC_ROOT = 'static/' # URL prefix for static", "'debug_toolbar', #Local apps #Application base 'Application', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware',", "validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',", "{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ]", "are using https SESSION_COOKIE_SECURE = False # Absolute filesystem path to the directory", "#Third party apps 'compressor', 'django_nose', 'django_extensions', 'debug_toolbar', #Local apps #Application base 'Application', ]", "Additional locations of static files STATICFILES_DIRS = [ # Put strings here, like", "\"/home/html/static\" or \"C:/www/django/static\". # Always use forward slashes, even on Windows. # Don't", "{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # insert your TEMPLATE_DIRS here os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT,", "production secret! SECRET_KEY = env(\"SECRET_KEY\", default=\"unsafe-secret-key\") # SECURITY WARNING: don't run with debug", "apps 'compressor', 'django_nose', 'django_extensions', 'debug_toolbar', #Local apps #Application base 'Application', ] MIDDLEWARE =", "import os import logging import environ from pathlib import Path SUPPORTED_NONLOCALES = ['media',", "'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel',", "= { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } # Argon2 password", "third party middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel',", "'django.contrib.messages', 'django.contrib.staticfiles', #Third party apps 'compressor', 'django_nose', 'django_extensions', 'debug_toolbar', #Local apps #Application base", "'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE", "turned on in production! # Debugging displays nice error messages, but leaks memory.", "the project like this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") #", "] DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' # Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND':", "# Always use forward slashes, even on Windows. # Don't forget to use", "default=\"unsafe-secret-key\") # SECURITY WARNING: don't run with debug turned on in production! #", "URL prefix for static files. STATIC_URL = '/static/' # Additional locations of static", "/ 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") env = environ.Env() # reading .env", "# Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] # cacheable files and compression support STATICFILES_STORAGE =", "= env(\"SECRET_KEY\", default=\"unsafe-secret-key\") # SECURITY WARNING: don't run with debug turned on in", "Absolute path to the directory static files should be collected to. STATIC_ROOT =", "= True USE_TZ = True # By default, be at least somewhat secure", "Always use forward slashes, even on Windows. # Don't forget to use absolute", "instances and True only for development. DEBUG = env('DEBUG') ALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"]", "party apps 'compressor', 'django_nose', 'django_extensions', 'debug_toolbar', #Local apps #Application base 'Application', ] MIDDLEWARE", "SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = env(\"SECRET_KEY\",", "you are using https SESSION_COOKIE_SECURE = False # Absolute filesystem path to the", "True # By default, be at least somewhat secure with our session cookies.", "'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS = ( # django contrib default finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third", "# Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True", "that will hold user-uploaded files. MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') # URL that handles", "environ from pathlib import Path SUPPORTED_NONLOCALES = ['media', 'admin', 'static'] # Build paths", "files. STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = [", "URL that handles the media served from MEDIA_ROOT. Make sure to use a", "True USE_TZ = True # By default, be at least somewhat secure with", "DEBUG=(bool, True) ) # Build paths inside the project like this: BASE_DIR /", "= False # Absolute filesystem path to the directory that will hold user-uploaded", "'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization", "run with debug turned on in production! # Debugging displays nice error messages,", "# Take environment variables from .env file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) # SECURITY WARNING: keep", "context processors here #'config.context_processors.custom_context_processor', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database", "= os.path.abspath(os.path.dirname(__file__) + \"../../../\") env = environ.Env() # reading .env file environ.Env.read_env() env", "STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = [ #", "# third party middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel',", "don't run with debug turned on in production! # Debugging displays nice error", "'django_extensions', 'debug_toolbar', #Local apps #Application base 'Application', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware',", "all server instances and True only for development. DEBUG = env('DEBUG') ALLOWED_HOSTS =", "finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third party finders 'compressor.finders.CompressorFinder', ) TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' INTERNAL_IPS", "only for development. DEBUG = env('DEBUG') ALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"] # Application definition", "the media served from MEDIA_ROOT. Make sure to use a MEDIA_URL = '/media/'", "USE_TZ = True # By default, be at least somewhat secure with our", "casting, default value DEBUG=(bool, True) ) # Build paths inside the project like", "SESSION_COOKIE_HTTPONLY = True # Set this to true if you are using https", "project like this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") # Take", "files and compression support STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS = ( # django contrib", "and True only for development. DEBUG = env('DEBUG') ALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"] #", "= 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ =", "# cacheable files and compression support STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS = ( #", "USE_I18N = True USE_L10N = True USE_TZ = True # By default, be", "our session cookies. SESSION_COOKIE_HTTPONLY = True # Set this to true if you", "STATICFILES_FINDERS = ( # django contrib default finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third party", "for static files. STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS", "= environ.Env() # reading .env file environ.Env.read_env() env = environ.Env( # set casting,", "here os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'), ], 'APP_DIRS': True, 'OPTIONS': { 'debug':", "= 'django.db.models.BigAutoField' # Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION':", "= [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',", "'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] # cacheable files and compression support", "'django_nose', 'django_extensions', 'debug_toolbar', #Local apps #Application base 'Application', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware',", "'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS':", "[ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel',", "# Build paths inside the project like this: BASE_DIR / 'subdir'. PROJECT_ROOT =", "LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ", "'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True", "'templates/layout'), ], 'APP_DIRS': True, 'OPTIONS': { 'debug': DEBUG, 'context_processors': [ # Default context", "MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # third party", "'NAME': 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ {", "WARNING: don't run with debug turned on in production! # Debugging displays nice", "used in production secret! SECRET_KEY = env(\"SECRET_KEY\", default=\"unsafe-secret-key\") # SECURITY WARNING: don't run", "key used in production secret! SECRET_KEY = env(\"SECRET_KEY\", default=\"unsafe-secret-key\") # SECURITY WARNING: don't", "'/media/' # Absolute path to the directory static files should be collected to.", "on in production! # Debugging displays nice error messages, but leaks memory. Set", "here, like \"/home/html/static\" or \"C:/www/django/static\". # Always use forward slashes, even on Windows.", "= os.path.abspath(os.path.dirname(__file__) + \"../../../\") # Take environment variables from .env file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env'))", "[ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] # cacheable files", "to the directory static files should be collected to. STATIC_ROOT = 'static/' #", "+ \"../../../\") # Take environment variables from .env file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) # SECURITY", "# Absolute filesystem path to the directory that will hold user-uploaded files. MEDIA_ROOT", "False # Absolute filesystem path to the directory that will hold user-uploaded files.", "keep the secret key used in production secret! SECRET_KEY = env(\"SECRET_KEY\", default=\"unsafe-secret-key\") #", "here #'config.context_processors.custom_context_processor', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases", "'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } # Argon2 password hashing PASSWORD_HASHERS", "= ( # django contrib default finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third party finders", "os.path.join(PROJECT_ROOT, 'media') # URL that handles the media served from MEDIA_ROOT. Make sure", "party middleware 'whitenoise.middleware.WhiteNoiseMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel',", "this to true if you are using https SESSION_COOKIE_SECURE = False # Absolute", "= True # Set this to true if you are using https SESSION_COOKIE_SECURE", "'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF", "error messages, but leaks memory. Set this to False # on all server", "memory. Set this to False # on all server instances and True only", "with our session cookies. SESSION_COOKIE_HTTPONLY = True # Set this to true if", "# Memorycached SESSIONS_ENGINE='django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', }", "and compression support STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS = ( # django contrib default", "'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PANELS = [ 'debug_toolbar.panels.history.HistoryPanel', 'debug_toolbar.panels.versions.VersionsPanel', 'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel',", "DEBUG = env('DEBUG') ALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"] # Application definition INSTALLED_APPS = [", "'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #Third party apps 'compressor', 'django_nose', 'django_extensions', 'debug_toolbar', #Local apps #Application", "# django contrib default finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third party finders 'compressor.finders.CompressorFinder', )", "slashes, even on Windows. # Don't forget to use absolute paths, not relative", "True USE_L10N = True USE_TZ = True # By default, be at least", "{ 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } # Argon2 password hashing", "[ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # third party middleware 'whitenoise.middleware.WhiteNoiseMiddleware',", "base 'Application', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware',", "'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ] ROOT_URLCONF = 'config.urls' TEMPLATES = [", "] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': {", "a MEDIA_URL = '/media/' # Absolute path to the directory static files should", "'context_processors': [ # Default context processors 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', # Custom context", "static files should be collected to. STATIC_ROOT = 'static/' # URL prefix for", "'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'), ], 'APP_DIRS': True, 'OPTIONS': { 'debug': DEBUG, 'context_processors': [ #", "this to False # on all server instances and True only for development.", "# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },", "( # django contrib default finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third party finders 'compressor.finders.CompressorFinder',", "os.path.abspath(os.path.dirname(__file__) + \"../../../\") env = environ.Env() # reading .env file environ.Env.read_env() env =", "paths inside the project like this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) +", "import logging import environ from pathlib import Path SUPPORTED_NONLOCALES = ['media', 'admin', 'static']", "Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N", "{ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/", "os import logging import environ from pathlib import Path SUPPORTED_NONLOCALES = ['media', 'admin',", "'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',", "'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS =", "= os.path.join(PROJECT_ROOT, 'media') # URL that handles the media served from MEDIA_ROOT. Make", "of static files STATICFILES_DIRS = [ # Put strings here, like \"/home/html/static\" or", "'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] # cacheable files and compression support STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' STATICFILES_FINDERS =", ") # Build paths inside the project like this: BASE_DIR / 'subdir'. PROJECT_ROOT", "logging import environ from pathlib import Path SUPPORTED_NONLOCALES = ['media', 'admin', 'static'] #", "this: BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") env = environ.Env() #", "True # Set this to true if you are using https SESSION_COOKIE_SECURE =", "to use a MEDIA_URL = '/media/' # Absolute path to the directory static", "sure to use a MEDIA_URL = '/media/' # Absolute path to the directory", "'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] # cacheable files and", "PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") env = environ.Env() # reading .env file environ.Env.read_env()", "#Django default apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #Third party apps 'compressor',", "Take environment variables from .env file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) # SECURITY WARNING: keep the", "'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE =", "production! # Debugging displays nice error messages, but leaks memory. Set this to", "DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'db.sqlite3', } } # Password", "hold user-uploaded files. MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') # URL that handles the media", "Absolute filesystem path to the directory that will hold user-uploaded files. MEDIA_ROOT =", "True only for development. DEBUG = env('DEBUG') ALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"] # Application", "] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # third", "collected to. STATIC_ROOT = 'static/' # URL prefix for static files. STATIC_URL =", "at least somewhat secure with our session cookies. SESSION_COOKIE_HTTPONLY = True # Set", "CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } # Argon2", "https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True", "file environ.Env.read_env() env = environ.Env( # set casting, default value DEBUG=(bool, True) )", "contrib default finders 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # third party finders 'compressor.finders.CompressorFinder', ) TEST_RUNNER =", "'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # By default,", "default value DEBUG=(bool, True) ) # Build paths inside the project like this:", "leaks memory. Set this to False # on all server instances and True", "'django.contrib.messages.context_processors.messages', # Custom context processors here #'config.context_processors.custom_context_processor', ], }, }, ] WSGI_APPLICATION =", "the secret key used in production secret! SECRET_KEY = env(\"SECRET_KEY\", default=\"unsafe-secret-key\") # SECURITY", "'static'] # Build paths inside the project like this: BASE_DIR / 'subdir'. PROJECT_ROOT", "# Absolute path to the directory static files should be collected to. STATIC_ROOT", "BASE_DIR / 'subdir'. PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + \"../../../\") env = environ.Env() # reading", "'media') # URL that handles the media served from MEDIA_ROOT. Make sure to", "} # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },", "default, be at least somewhat secure with our session cookies. SESSION_COOKIE_HTTPONLY = True", "# SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY =", "AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME':", "os.path.join(PROJECT_ROOT, 'templates'), os.path.join(PROJECT_ROOT, 'templates/.base'), os.path.join(PROJECT_ROOT, 'templates/layout'), ], 'APP_DIRS': True, 'OPTIONS': { 'debug': DEBUG,", "'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME':", "os.path.join(PROJECT_ROOT, 'templates/layout'), ], 'APP_DIRS': True, 'OPTIONS': { 'debug': DEBUG, 'context_processors': [ # Default", "somewhat secure with our session cookies. SESSION_COOKIE_HTTPONLY = True # Set this to", "STATICFILES_DIRS = [ # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\". # Always", "apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', #Third party apps 'compressor', 'django_nose', 'django_extensions',", "SUPPORTED_NONLOCALES = ['media', 'admin', 'static'] # Build paths inside the project like this:", "# Additional locations of static files STATICFILES_DIRS = [ # Put strings here,", "'debug_toolbar.panels.timer.TimerPanel', 'debug_toolbar.panels.settings.SettingsPanel', 'debug_toolbar.panels.headers.HeadersPanel', 'debug_toolbar.panels.request.RequestPanel', 'debug_toolbar.panels.sql.SQLPanel', 'debug_toolbar.panels.staticfiles.StaticFilesPanel', 'debug_toolbar.panels.templates.TemplatesPanel', 'debug_toolbar.panels.cache.CachePanel', 'debug_toolbar.panels.signals.SignalsPanel', 'debug_toolbar.panels.logging.LoggingPanel', 'debug_toolbar.panels.redirects.RedirectsPanel', 'debug_toolbar.panels.profiling.ProfilingPanel', ]", "'LOCATION': '127.0.0.1:11211', } } # Argon2 password hashing PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher',", "'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', # Custom hasher 'Application.hashers.PBKDF2WrappedSHA1PasswordHasher', ] # cacheable files and compression support STATICFILES_STORAGE", "variables from .env file environ.Env.read_env(os.path.join(PROJECT_ROOT, '.env')) # SECURITY WARNING: keep the secret key", "{ 'debug': DEBUG, 'context_processors': [ # Default context processors 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages',", "Set this to False # on all server instances and True only for" ]
[ "k3*(r**6)) # 직교좌표 및 좌상단 기준으로 복원 ---⑤ mapx, mapy = cv2.polarToCart(ru, theta)", "변환 ---③ mapx = 2*mapx/(cols-1)-1 mapy = 2*mapy/(rows-1)-1 r, theta = cv2.cartToPolar(mapx, mapy)", "rows, cols = img.shape[:2] # 매핑 배열 생성 ---② mapy, mapx = np.indices((rows,", "왜곡 계수 설정 ---① k1, k2, k3 = 0.5, 0.2, 0.0 # 배럴", "k2, k3 = -0.3, 0, 0 # 핀큐션 왜곡 img = cv2.imread('../img/girl.jpg') rows,", "k3 = 0.5, 0.2, 0.0 # 배럴 왜곡 #k1, k2, k3 = -0.3,", "---⑤ mapx, mapy = cv2.polarToCart(ru, theta) mapx = ((mapx + 1)*cols-1)/2 mapy =", "직교좌표 및 좌상단 기준으로 복원 ---⑤ mapx, mapy = cv2.polarToCart(ru, theta) mapx =", "k2*(r**4) + k3*(r**6)) # 직교좌표 및 좌상단 기준으로 복원 ---⑤ mapx, mapy =", "mapy = 2*mapy/(rows-1)-1 r, theta = cv2.cartToPolar(mapx, mapy) # 방사 왜곡 변영 연산", "# 중앙점 좌표로 -1~1 정규화 및 극좌표 변환 ---③ mapx = 2*mapx/(cols-1)-1 mapy", "img.shape[:2] # 매핑 배열 생성 ---② mapy, mapx = np.indices((rows, cols),dtype=np.float32) # 중앙점", "= img.shape[:2] # 매핑 배열 생성 ---② mapy, mapx = np.indices((rows, cols),dtype=np.float32) #", "변영 연산 ---④ ru = r*(1+k1*(r**2) + k2*(r**4) + k3*(r**6)) # 직교좌표 및", "방사 왜곡 변영 연산 ---④ ru = r*(1+k1*(r**2) + k2*(r**4) + k3*(r**6)) #", "1)*rows-1)/2 # 리매핑 ---⑥ distored = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR) cv2.imshow('original', img) cv2.imshow('distorted', distored) cv2.waitKey() cv2.destroyAllWindows()", "#k1, k2, k3 = -0.3, 0, 0 # 핀큐션 왜곡 img = cv2.imread('../img/girl.jpg')", "2*mapx/(cols-1)-1 mapy = 2*mapy/(rows-1)-1 r, theta = cv2.cartToPolar(mapx, mapy) # 방사 왜곡 변영", "왜곡 img = cv2.imread('../img/girl.jpg') rows, cols = img.shape[:2] # 매핑 배열 생성 ---②", "= r*(1+k1*(r**2) + k2*(r**4) + k3*(r**6)) # 직교좌표 및 좌상단 기준으로 복원 ---⑤", "2*mapy/(rows-1)-1 r, theta = cv2.cartToPolar(mapx, mapy) # 방사 왜곡 변영 연산 ---④ ru", "+ k3*(r**6)) # 직교좌표 및 좌상단 기준으로 복원 ---⑤ mapx, mapy = cv2.polarToCart(ru,", "극좌표 변환 ---③ mapx = 2*mapx/(cols-1)-1 mapy = 2*mapy/(rows-1)-1 r, theta = cv2.cartToPolar(mapx,", "매핑 배열 생성 ---② mapy, mapx = np.indices((rows, cols),dtype=np.float32) # 중앙점 좌표로 -1~1", "mapy = ((mapy + 1)*rows-1)/2 # 리매핑 ---⑥ distored = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR) cv2.imshow('original', img)", "핀큐션 왜곡 img = cv2.imread('../img/girl.jpg') rows, cols = img.shape[:2] # 매핑 배열 생성", "중앙점 좌표로 -1~1 정규화 및 극좌표 변환 ---③ mapx = 2*mapx/(cols-1)-1 mapy =", "as np # 왜곡 계수 설정 ---① k1, k2, k3 = 0.5, 0.2,", "배열 생성 ---② mapy, mapx = np.indices((rows, cols),dtype=np.float32) # 중앙점 좌표로 -1~1 정규화", "cv2.cartToPolar(mapx, mapy) # 방사 왜곡 변영 연산 ---④ ru = r*(1+k1*(r**2) + k2*(r**4)", "---② mapy, mapx = np.indices((rows, cols),dtype=np.float32) # 중앙점 좌표로 -1~1 정규화 및 극좌표", "# 방사 왜곡 변영 연산 ---④ ru = r*(1+k1*(r**2) + k2*(r**4) + k3*(r**6))", "+ 1)*cols-1)/2 mapy = ((mapy + 1)*rows-1)/2 # 리매핑 ---⑥ distored = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR)", "---① k1, k2, k3 = 0.5, 0.2, 0.0 # 배럴 왜곡 #k1, k2,", "= 2*mapx/(cols-1)-1 mapy = 2*mapy/(rows-1)-1 r, theta = cv2.cartToPolar(mapx, mapy) # 방사 왜곡", "r, theta = cv2.cartToPolar(mapx, mapy) # 방사 왜곡 변영 연산 ---④ ru =", "= 0.5, 0.2, 0.0 # 배럴 왜곡 #k1, k2, k3 = -0.3, 0,", "1)*cols-1)/2 mapy = ((mapy + 1)*rows-1)/2 # 리매핑 ---⑥ distored = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR) cv2.imshow('original',", "img = cv2.imread('../img/girl.jpg') rows, cols = img.shape[:2] # 매핑 배열 생성 ---② mapy,", "= np.indices((rows, cols),dtype=np.float32) # 중앙점 좌표로 -1~1 정규화 및 극좌표 변환 ---③ mapx", "0.0 # 배럴 왜곡 #k1, k2, k3 = -0.3, 0, 0 # 핀큐션", "cols),dtype=np.float32) # 중앙점 좌표로 -1~1 정규화 및 극좌표 변환 ---③ mapx = 2*mapx/(cols-1)-1", "계수 설정 ---① k1, k2, k3 = 0.5, 0.2, 0.0 # 배럴 왜곡", "k3 = -0.3, 0, 0 # 핀큐션 왜곡 img = cv2.imread('../img/girl.jpg') rows, cols", "0, 0 # 핀큐션 왜곡 img = cv2.imread('../img/girl.jpg') rows, cols = img.shape[:2] #", "# 배럴 왜곡 #k1, k2, k3 = -0.3, 0, 0 # 핀큐션 왜곡", "---④ ru = r*(1+k1*(r**2) + k2*(r**4) + k3*(r**6)) # 직교좌표 및 좌상단 기준으로", "((mapy + 1)*rows-1)/2 # 리매핑 ---⑥ distored = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR) cv2.imshow('original', img) cv2.imshow('distorted', distored)", "cv2.imread('../img/girl.jpg') rows, cols = img.shape[:2] # 매핑 배열 생성 ---② mapy, mapx =", "mapx, mapy = cv2.polarToCart(ru, theta) mapx = ((mapx + 1)*cols-1)/2 mapy = ((mapy", "np.indices((rows, cols),dtype=np.float32) # 중앙점 좌표로 -1~1 정규화 및 극좌표 변환 ---③ mapx =", "# 직교좌표 및 좌상단 기준으로 복원 ---⑤ mapx, mapy = cv2.polarToCart(ru, theta) mapx", "= cv2.imread('../img/girl.jpg') rows, cols = img.shape[:2] # 매핑 배열 생성 ---② mapy, mapx", "cols = img.shape[:2] # 매핑 배열 생성 ---② mapy, mapx = np.indices((rows, cols),dtype=np.float32)", "+ 1)*rows-1)/2 # 리매핑 ---⑥ distored = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR) cv2.imshow('original', img) cv2.imshow('distorted', distored) cv2.waitKey()", "0 # 핀큐션 왜곡 img = cv2.imread('../img/girl.jpg') rows, cols = img.shape[:2] # 매핑", "좌표로 -1~1 정규화 및 극좌표 변환 ---③ mapx = 2*mapx/(cols-1)-1 mapy = 2*mapy/(rows-1)-1", "# 왜곡 계수 설정 ---① k1, k2, k3 = 0.5, 0.2, 0.0 #", "import numpy as np # 왜곡 계수 설정 ---① k1, k2, k3 =", "theta = cv2.cartToPolar(mapx, mapy) # 방사 왜곡 변영 연산 ---④ ru = r*(1+k1*(r**2)", "왜곡 변영 연산 ---④ ru = r*(1+k1*(r**2) + k2*(r**4) + k3*(r**6)) # 직교좌표", "= ((mapy + 1)*rows-1)/2 # 리매핑 ---⑥ distored = cv2.remap(img,mapx,mapy,cv2.INTER_LINEAR) cv2.imshow('original', img) cv2.imshow('distorted',", "= -0.3, 0, 0 # 핀큐션 왜곡 img = cv2.imread('../img/girl.jpg') rows, cols =", "ru = r*(1+k1*(r**2) + k2*(r**4) + k3*(r**6)) # 직교좌표 및 좌상단 기준으로 복원", "설정 ---① k1, k2, k3 = 0.5, 0.2, 0.0 # 배럴 왜곡 #k1,", "mapx = np.indices((rows, cols),dtype=np.float32) # 중앙점 좌표로 -1~1 정규화 및 극좌표 변환 ---③", "numpy as np # 왜곡 계수 설정 ---① k1, k2, k3 = 0.5,", "mapy, mapx = np.indices((rows, cols),dtype=np.float32) # 중앙점 좌표로 -1~1 정규화 및 극좌표 변환", "및 극좌표 변환 ---③ mapx = 2*mapx/(cols-1)-1 mapy = 2*mapy/(rows-1)-1 r, theta =", "np # 왜곡 계수 설정 ---① k1, k2, k3 = 0.5, 0.2, 0.0", "k2, k3 = 0.5, 0.2, 0.0 # 배럴 왜곡 #k1, k2, k3 =", "+ k2*(r**4) + k3*(r**6)) # 직교좌표 및 좌상단 기준으로 복원 ---⑤ mapx, mapy", "mapx = 2*mapx/(cols-1)-1 mapy = 2*mapy/(rows-1)-1 r, theta = cv2.cartToPolar(mapx, mapy) # 방사", "정규화 및 극좌표 변환 ---③ mapx = 2*mapx/(cols-1)-1 mapy = 2*mapy/(rows-1)-1 r, theta", "0.5, 0.2, 0.0 # 배럴 왜곡 #k1, k2, k3 = -0.3, 0, 0", "cv2 import numpy as np # 왜곡 계수 설정 ---① k1, k2, k3", "<reponame>dongrami0425/Python_OpenCV-Study import cv2 import numpy as np # 왜곡 계수 설정 ---① k1,", "theta) mapx = ((mapx + 1)*cols-1)/2 mapy = ((mapy + 1)*rows-1)/2 # 리매핑", "및 좌상단 기준으로 복원 ---⑤ mapx, mapy = cv2.polarToCart(ru, theta) mapx = ((mapx", "-0.3, 0, 0 # 핀큐션 왜곡 img = cv2.imread('../img/girl.jpg') rows, cols = img.shape[:2]", "cv2.polarToCart(ru, theta) mapx = ((mapx + 1)*cols-1)/2 mapy = ((mapy + 1)*rows-1)/2 #", "# 매핑 배열 생성 ---② mapy, mapx = np.indices((rows, cols),dtype=np.float32) # 중앙점 좌표로", "-1~1 정규화 및 극좌표 변환 ---③ mapx = 2*mapx/(cols-1)-1 mapy = 2*mapy/(rows-1)-1 r,", "= 2*mapy/(rows-1)-1 r, theta = cv2.cartToPolar(mapx, mapy) # 방사 왜곡 변영 연산 ---④", "기준으로 복원 ---⑤ mapx, mapy = cv2.polarToCart(ru, theta) mapx = ((mapx + 1)*cols-1)/2", "import cv2 import numpy as np # 왜곡 계수 설정 ---① k1, k2,", "좌상단 기준으로 복원 ---⑤ mapx, mapy = cv2.polarToCart(ru, theta) mapx = ((mapx +", "---③ mapx = 2*mapx/(cols-1)-1 mapy = 2*mapy/(rows-1)-1 r, theta = cv2.cartToPolar(mapx, mapy) #", "mapy = cv2.polarToCart(ru, theta) mapx = ((mapx + 1)*cols-1)/2 mapy = ((mapy +", "mapx = ((mapx + 1)*cols-1)/2 mapy = ((mapy + 1)*rows-1)/2 # 리매핑 ---⑥", "mapy) # 방사 왜곡 변영 연산 ---④ ru = r*(1+k1*(r**2) + k2*(r**4) +", "= cv2.cartToPolar(mapx, mapy) # 방사 왜곡 변영 연산 ---④ ru = r*(1+k1*(r**2) +", "= ((mapx + 1)*cols-1)/2 mapy = ((mapy + 1)*rows-1)/2 # 리매핑 ---⑥ distored", "0.2, 0.0 # 배럴 왜곡 #k1, k2, k3 = -0.3, 0, 0 #", "배럴 왜곡 #k1, k2, k3 = -0.3, 0, 0 # 핀큐션 왜곡 img", "# 핀큐션 왜곡 img = cv2.imread('../img/girl.jpg') rows, cols = img.shape[:2] # 매핑 배열", "생성 ---② mapy, mapx = np.indices((rows, cols),dtype=np.float32) # 중앙점 좌표로 -1~1 정규화 및", "r*(1+k1*(r**2) + k2*(r**4) + k3*(r**6)) # 직교좌표 및 좌상단 기준으로 복원 ---⑤ mapx,", "복원 ---⑤ mapx, mapy = cv2.polarToCart(ru, theta) mapx = ((mapx + 1)*cols-1)/2 mapy", "k1, k2, k3 = 0.5, 0.2, 0.0 # 배럴 왜곡 #k1, k2, k3", "((mapx + 1)*cols-1)/2 mapy = ((mapy + 1)*rows-1)/2 # 리매핑 ---⑥ distored =", "연산 ---④ ru = r*(1+k1*(r**2) + k2*(r**4) + k3*(r**6)) # 직교좌표 및 좌상단", "= cv2.polarToCart(ru, theta) mapx = ((mapx + 1)*cols-1)/2 mapy = ((mapy + 1)*rows-1)/2", "왜곡 #k1, k2, k3 = -0.3, 0, 0 # 핀큐션 왜곡 img =" ]
[ "SOCK_DGRAM) message = input('Input lowercase sentence:') clientSocket.sendto(message.encode(), (serverName, serverPort)) modifiedMessage, serverAddress = clientSocket.recvfrom(2048)", "serverPort = 12000 clientSocket = socket(AF_INET, SOCK_DGRAM) message = input('Input lowercase sentence:') clientSocket.sendto(message.encode(),", "from socket import * serverName = 'localhost' serverPort = 12000 clientSocket = socket(AF_INET,", "* serverName = 'localhost' serverPort = 12000 clientSocket = socket(AF_INET, SOCK_DGRAM) message =", "'localhost' serverPort = 12000 clientSocket = socket(AF_INET, SOCK_DGRAM) message = input('Input lowercase sentence:')", "= socket(AF_INET, SOCK_DGRAM) message = input('Input lowercase sentence:') clientSocket.sendto(message.encode(), (serverName, serverPort)) modifiedMessage, serverAddress", "message = input('Input lowercase sentence:') clientSocket.sendto(message.encode(), (serverName, serverPort)) modifiedMessage, serverAddress = clientSocket.recvfrom(2048) print(modifiedMessage.decode())", "= 'localhost' serverPort = 12000 clientSocket = socket(AF_INET, SOCK_DGRAM) message = input('Input lowercase", "12000 clientSocket = socket(AF_INET, SOCK_DGRAM) message = input('Input lowercase sentence:') clientSocket.sendto(message.encode(), (serverName, serverPort))", "= input('Input lowercase sentence:') clientSocket.sendto(message.encode(), (serverName, serverPort)) modifiedMessage, serverAddress = clientSocket.recvfrom(2048) print(modifiedMessage.decode()) clientSocket.close()", "socket import * serverName = 'localhost' serverPort = 12000 clientSocket = socket(AF_INET, SOCK_DGRAM)", "serverName = 'localhost' serverPort = 12000 clientSocket = socket(AF_INET, SOCK_DGRAM) message = input('Input", "socket(AF_INET, SOCK_DGRAM) message = input('Input lowercase sentence:') clientSocket.sendto(message.encode(), (serverName, serverPort)) modifiedMessage, serverAddress =", "import * serverName = 'localhost' serverPort = 12000 clientSocket = socket(AF_INET, SOCK_DGRAM) message", "clientSocket = socket(AF_INET, SOCK_DGRAM) message = input('Input lowercase sentence:') clientSocket.sendto(message.encode(), (serverName, serverPort)) modifiedMessage,", "= 12000 clientSocket = socket(AF_INET, SOCK_DGRAM) message = input('Input lowercase sentence:') clientSocket.sendto(message.encode(), (serverName," ]
[ "range(7)]) mail_fix = \"@gmail.com\" email = mail_lo + mail_fix return email class TestData:", "= 'Hello, én egy A009 test User vagyok!.' class MyRND(): chars_lo = string.ascii_lowercase", "randomly generated users (arbitrary number of users, default is 2) import random import", "\"\".join([random.choice(cls.chars_int) for _ in range(8)]) pp_up = \"\".join([random.choice(cls.chars_up) for _ in range(8)]) pchars", "for scrolling function test # test with randomly generated users (arbitrary number of", "_ in range(8)]) pchars = pp_lo[4] + pp_int[0] + pp_up[7] + pp_lo[1:3] +", "in range(7)]) mail_fix = \"@gmail.com\" email = mail_lo + mail_fix return email class", "*'[{&| etc @classmethod def uname(cls): return \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) @classmethod def", "= <PASSWORD>() self.data.append(d) # set number of randomly generated users td = TestData(2)", "users = [] for user in td_list: user_data = [] for value in", "users for scrolling function test # test with randomly generated users (arbitrary number", "_ in range(7)]) mail_fix = \"@gmail.com\" email = mail_lo + mail_fix return email", "class MyRND(): chars_lo = string.ascii_lowercase chars_int = string.digits chars_up = string.ascii_uppercase chars =", "@classmethod def uname(cls): return \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) @classmethod def ppass(cls): pp_lo", "mail_lo + mail_fix return email class TestData: def __init__(self, rn): self.data = []", "return email class TestData: def __init__(self, rn): self.data = [] for i in", "= MyRND.uname() d[\"email\"] = MyRND.email() d[\"password\"] = <PASSWORD>() self.data.append(d) # set number of", "pp_int[0] + pp_up[7] + pp_lo[1:3] + pp_int[3] + pp_up[4] + pp_lo[6] return pchars", "_ in range(8)]) pp_up = \"\".join([random.choice(cls.chars_up) for _ in range(8)]) pchars = pp_lo[4]", "enter users for scrolling function test # test with randomly generated users (arbitrary", "test with randomly generated users (arbitrary number of users, default is 2) import", "2) import random import string title = 'Hello, én egy A009 test User", "= pp_lo[4] + pp_int[0] + pp_up[7] + pp_lo[1:3] + pp_int[3] + pp_up[4] +", "is 2) import random import string title = 'Hello, én egy A009 test", "i in range(rn): d = {} d[\"username\"] = MyRND.uname() d[\"email\"] = MyRND.email() d[\"password\"]", "number of users, default is 2) import random import string title = 'Hello,", "= string.ascii_uppercase chars = string.punctuation # *'[{&| etc @classmethod def uname(cls): return \"\".join([random.choice(cls.chars_lo)", "etc @classmethod def uname(cls): return \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) @classmethod def ppass(cls):", "in range(8)]) @classmethod def ppass(cls): pp_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) pp_int", "email(cls): mail_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(7)]) mail_fix = \"@gmail.com\" email =", "self.data.append(d) # set number of randomly generated users td = TestData(2) td_list =", "print(td_list) users = [] for user in td_list: user_data = [] for value", "= mail_lo + mail_fix return email class TestData: def __init__(self, rn): self.data =", "range(8)]) pp_int = \"\".join([random.choice(cls.chars_int) for _ in range(8)]) pp_up = \"\".join([random.choice(cls.chars_up) for _", "def __init__(self, rn): self.data = [] for i in range(rn): d = {}", "pp_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) pp_int = \"\".join([random.choice(cls.chars_int) for _ in", "mail_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(7)]) mail_fix = \"@gmail.com\" email = mail_lo", "randomly generated users td = TestData(2) td_list = td.data print(td_list) users = []", "# A009 - enter users for scrolling function test # test with randomly", "users (arbitrary number of users, default is 2) import random import string title", "random import string title = 'Hello, én egy A009 test User vagyok!.' class", "[] for i in range(rn): d = {} d[\"username\"] = MyRND.uname() d[\"email\"] =", "+ pp_int[0] + pp_up[7] + pp_lo[1:3] + pp_int[3] + pp_up[4] + pp_lo[6] return", "+ pp_lo[1:3] + pp_int[3] + pp_up[4] + pp_lo[6] return pchars @classmethod def email(cls):", "[] for user in td_list: user_data = [] for value in user.values(): user_data.append(value)", "title = 'Hello, én egy A009 test User vagyok!.' class MyRND(): chars_lo =", "User vagyok!.' class MyRND(): chars_lo = string.ascii_lowercase chars_int = string.digits chars_up = string.ascii_uppercase", "import random import string title = 'Hello, én egy A009 test User vagyok!.'", "= \"\".join([random.choice(cls.chars_up) for _ in range(8)]) pchars = pp_lo[4] + pp_int[0] + pp_up[7]", "user in td_list: user_data = [] for value in user.values(): user_data.append(value) users.append(user_data) print(users)", "@classmethod def ppass(cls): pp_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) pp_int = \"\".join([random.choice(cls.chars_int)", "én egy A009 test User vagyok!.' class MyRND(): chars_lo = string.ascii_lowercase chars_int =", "MyRND(): chars_lo = string.ascii_lowercase chars_int = string.digits chars_up = string.ascii_uppercase chars = string.punctuation", "email = mail_lo + mail_fix return email class TestData: def __init__(self, rn): self.data", "for i in range(rn): d = {} d[\"username\"] = MyRND.uname() d[\"email\"] = MyRND.email()", "(arbitrary number of users, default is 2) import random import string title =", "d = {} d[\"username\"] = MyRND.uname() d[\"email\"] = MyRND.email() d[\"password\"] = <PASSWORD>() self.data.append(d)", "pp_up[7] + pp_lo[1:3] + pp_int[3] + pp_up[4] + pp_lo[6] return pchars @classmethod def", "def uname(cls): return \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) @classmethod def ppass(cls): pp_lo =", "td.data print(td_list) users = [] for user in td_list: user_data = [] for", "= [] for user in td_list: user_data = [] for value in user.values():", "number of randomly generated users td = TestData(2) td_list = td.data print(td_list) users", "default is 2) import random import string title = 'Hello, én egy A009", "+ pp_lo[6] return pchars @classmethod def email(cls): mail_lo = \"\".join([random.choice(cls.chars_lo) for _ in", "string.digits chars_up = string.ascii_uppercase chars = string.punctuation # *'[{&| etc @classmethod def uname(cls):", "# *'[{&| etc @classmethod def uname(cls): return \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) @classmethod", "chars_int = string.digits chars_up = string.ascii_uppercase chars = string.punctuation # *'[{&| etc @classmethod", "chars = string.punctuation # *'[{&| etc @classmethod def uname(cls): return \"\".join([random.choice(cls.chars_lo) for _", "# set number of randomly generated users td = TestData(2) td_list = td.data", "= TestData(2) td_list = td.data print(td_list) users = [] for user in td_list:", "pp_int[3] + pp_up[4] + pp_lo[6] return pchars @classmethod def email(cls): mail_lo = \"\".join([random.choice(cls.chars_lo)", "pchars = pp_lo[4] + pp_int[0] + pp_up[7] + pp_lo[1:3] + pp_int[3] + pp_up[4]", "egy A009 test User vagyok!.' class MyRND(): chars_lo = string.ascii_lowercase chars_int = string.digits", "\"\".join([random.choice(cls.chars_lo) for _ in range(8)]) @classmethod def ppass(cls): pp_lo = \"\".join([random.choice(cls.chars_lo) for _", "_ in range(8)]) @classmethod def ppass(cls): pp_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(8)])", "@classmethod def email(cls): mail_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(7)]) mail_fix = \"@gmail.com\"", "for _ in range(8)]) pp_up = \"\".join([random.choice(cls.chars_up) for _ in range(8)]) pchars =", "string.punctuation # *'[{&| etc @classmethod def uname(cls): return \"\".join([random.choice(cls.chars_lo) for _ in range(8)])", "d[\"email\"] = MyRND.email() d[\"password\"] = <PASSWORD>() self.data.append(d) # set number of randomly generated", "generated users td = TestData(2) td_list = td.data print(td_list) users = [] for", "= [] for i in range(rn): d = {} d[\"username\"] = MyRND.uname() d[\"email\"]", "= string.digits chars_up = string.ascii_uppercase chars = string.punctuation # *'[{&| etc @classmethod def", "in range(8)]) pp_int = \"\".join([random.choice(cls.chars_int) for _ in range(8)]) pp_up = \"\".join([random.choice(cls.chars_up) for", "of randomly generated users td = TestData(2) td_list = td.data print(td_list) users =", "= \"\".join([random.choice(cls.chars_int) for _ in range(8)]) pp_up = \"\".join([random.choice(cls.chars_up) for _ in range(8)])", "import string title = 'Hello, én egy A009 test User vagyok!.' class MyRND():", "\"@gmail.com\" email = mail_lo + mail_fix return email class TestData: def __init__(self, rn):", "return pchars @classmethod def email(cls): mail_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(7)]) mail_fix", "string title = 'Hello, én egy A009 test User vagyok!.' class MyRND(): chars_lo", "self.data = [] for i in range(rn): d = {} d[\"username\"] = MyRND.uname()", "= string.punctuation # *'[{&| etc @classmethod def uname(cls): return \"\".join([random.choice(cls.chars_lo) for _ in", "generated users (arbitrary number of users, default is 2) import random import string", "range(8)]) @classmethod def ppass(cls): pp_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) pp_int =", "ppass(cls): pp_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) pp_int = \"\".join([random.choice(cls.chars_int) for _", "for _ in range(8)]) pp_int = \"\".join([random.choice(cls.chars_int) for _ in range(8)]) pp_up =", "uname(cls): return \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) @classmethod def ppass(cls): pp_lo = \"\".join([random.choice(cls.chars_lo)", "pchars @classmethod def email(cls): mail_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(7)]) mail_fix =", "for user in td_list: user_data = [] for value in user.values(): user_data.append(value) users.append(user_data)", "pp_up[4] + pp_lo[6] return pchars @classmethod def email(cls): mail_lo = \"\".join([random.choice(cls.chars_lo) for _", "with randomly generated users (arbitrary number of users, default is 2) import random", "def email(cls): mail_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(7)]) mail_fix = \"@gmail.com\" email", "pp_lo[4] + pp_int[0] + pp_up[7] + pp_lo[1:3] + pp_int[3] + pp_up[4] + pp_lo[6]", "for _ in range(8)]) pchars = pp_lo[4] + pp_int[0] + pp_up[7] + pp_lo[1:3]", "\"\".join([random.choice(cls.chars_lo) for _ in range(8)]) pp_int = \"\".join([random.choice(cls.chars_int) for _ in range(8)]) pp_up", "d[\"username\"] = MyRND.uname() d[\"email\"] = MyRND.email() d[\"password\"] = <PASSWORD>() self.data.append(d) # set number", "range(rn): d = {} d[\"username\"] = MyRND.uname() d[\"email\"] = MyRND.email() d[\"password\"] = <PASSWORD>()", "class TestData: def __init__(self, rn): self.data = [] for i in range(rn): d", "= MyRND.email() d[\"password\"] = <PASSWORD>() self.data.append(d) # set number of randomly generated users", "td = TestData(2) td_list = td.data print(td_list) users = [] for user in", "= \"@gmail.com\" email = mail_lo + mail_fix return email class TestData: def __init__(self,", "pp_int = \"\".join([random.choice(cls.chars_int) for _ in range(8)]) pp_up = \"\".join([random.choice(cls.chars_up) for _ in", "scrolling function test # test with randomly generated users (arbitrary number of users,", "<PASSWORD>() self.data.append(d) # set number of randomly generated users td = TestData(2) td_list", "pp_lo[1:3] + pp_int[3] + pp_up[4] + pp_lo[6] return pchars @classmethod def email(cls): mail_lo", "string.ascii_lowercase chars_int = string.digits chars_up = string.ascii_uppercase chars = string.punctuation # *'[{&| etc", "in range(8)]) pchars = pp_lo[4] + pp_int[0] + pp_up[7] + pp_lo[1:3] + pp_int[3]", "users, default is 2) import random import string title = 'Hello, én egy", "for _ in range(8)]) @classmethod def ppass(cls): pp_lo = \"\".join([random.choice(cls.chars_lo) for _ in", "+ mail_fix return email class TestData: def __init__(self, rn): self.data = [] for", "MyRND.email() d[\"password\"] = <PASSWORD>() self.data.append(d) # set number of randomly generated users td", "td_list = td.data print(td_list) users = [] for user in td_list: user_data =", "= \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) pp_int = \"\".join([random.choice(cls.chars_int) for _ in range(8)])", "set number of randomly generated users td = TestData(2) td_list = td.data print(td_list)", "- enter users for scrolling function test # test with randomly generated users", "vagyok!.' class MyRND(): chars_lo = string.ascii_lowercase chars_int = string.digits chars_up = string.ascii_uppercase chars", "= {} d[\"username\"] = MyRND.uname() d[\"email\"] = MyRND.email() d[\"password\"] = <PASSWORD>() self.data.append(d) #", "TestData(2) td_list = td.data print(td_list) users = [] for user in td_list: user_data", "= td.data print(td_list) users = [] for user in td_list: user_data = []", "= \"\".join([random.choice(cls.chars_lo) for _ in range(7)]) mail_fix = \"@gmail.com\" email = mail_lo +", "pp_lo[6] return pchars @classmethod def email(cls): mail_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(7)])", "__init__(self, rn): self.data = [] for i in range(rn): d = {} d[\"username\"]", "A009 - enter users for scrolling function test # test with randomly generated", "test User vagyok!.' class MyRND(): chars_lo = string.ascii_lowercase chars_int = string.digits chars_up =", "+ pp_up[4] + pp_lo[6] return pchars @classmethod def email(cls): mail_lo = \"\".join([random.choice(cls.chars_lo) for", "_ in range(8)]) pp_int = \"\".join([random.choice(cls.chars_int) for _ in range(8)]) pp_up = \"\".join([random.choice(cls.chars_up)", "d[\"password\"] = <PASSWORD>() self.data.append(d) # set number of randomly generated users td =", "def ppass(cls): pp_lo = \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) pp_int = \"\".join([random.choice(cls.chars_int) for", "{} d[\"username\"] = MyRND.uname() d[\"email\"] = MyRND.email() d[\"password\"] = <PASSWORD>() self.data.append(d) # set", "'Hello, én egy A009 test User vagyok!.' class MyRND(): chars_lo = string.ascii_lowercase chars_int", "range(8)]) pchars = pp_lo[4] + pp_int[0] + pp_up[7] + pp_lo[1:3] + pp_int[3] +", "for _ in range(7)]) mail_fix = \"@gmail.com\" email = mail_lo + mail_fix return", "\"\".join([random.choice(cls.chars_up) for _ in range(8)]) pchars = pp_lo[4] + pp_int[0] + pp_up[7] +", "pp_up = \"\".join([random.choice(cls.chars_up) for _ in range(8)]) pchars = pp_lo[4] + pp_int[0] +", "+ pp_up[7] + pp_lo[1:3] + pp_int[3] + pp_up[4] + pp_lo[6] return pchars @classmethod", "test # test with randomly generated users (arbitrary number of users, default is", "mail_fix = \"@gmail.com\" email = mail_lo + mail_fix return email class TestData: def", "email class TestData: def __init__(self, rn): self.data = [] for i in range(rn):", "rn): self.data = [] for i in range(rn): d = {} d[\"username\"] =", "chars_up = string.ascii_uppercase chars = string.punctuation # *'[{&| etc @classmethod def uname(cls): return", "\"\".join([random.choice(cls.chars_lo) for _ in range(7)]) mail_fix = \"@gmail.com\" email = mail_lo + mail_fix", "function test # test with randomly generated users (arbitrary number of users, default", "range(8)]) pp_up = \"\".join([random.choice(cls.chars_up) for _ in range(8)]) pchars = pp_lo[4] + pp_int[0]", "MyRND.uname() d[\"email\"] = MyRND.email() d[\"password\"] = <PASSWORD>() self.data.append(d) # set number of randomly", "= string.ascii_lowercase chars_int = string.digits chars_up = string.ascii_uppercase chars = string.punctuation # *'[{&|", "return \"\".join([random.choice(cls.chars_lo) for _ in range(8)]) @classmethod def ppass(cls): pp_lo = \"\".join([random.choice(cls.chars_lo) for", "TestData: def __init__(self, rn): self.data = [] for i in range(rn): d =", "users td = TestData(2) td_list = td.data print(td_list) users = [] for user", "of users, default is 2) import random import string title = 'Hello, én", "A009 test User vagyok!.' class MyRND(): chars_lo = string.ascii_lowercase chars_int = string.digits chars_up", "mail_fix return email class TestData: def __init__(self, rn): self.data = [] for i", "in range(8)]) pp_up = \"\".join([random.choice(cls.chars_up) for _ in range(8)]) pchars = pp_lo[4] +", "# test with randomly generated users (arbitrary number of users, default is 2)", "in range(rn): d = {} d[\"username\"] = MyRND.uname() d[\"email\"] = MyRND.email() d[\"password\"] =", "+ pp_int[3] + pp_up[4] + pp_lo[6] return pchars @classmethod def email(cls): mail_lo =", "chars_lo = string.ascii_lowercase chars_int = string.digits chars_up = string.ascii_uppercase chars = string.punctuation #", "string.ascii_uppercase chars = string.punctuation # *'[{&| etc @classmethod def uname(cls): return \"\".join([random.choice(cls.chars_lo) for" ]
[ "len(anomaly_columns), 'please provide anomaly value for each anomaly column indicator' m = len(anomaly_columns)", "if len(x)==10 else None).apply(pd.Series)) nio_df.columns = ['1,000,000,000', '100,000,000', '10,000,000', '1,000,000', '100,000', '10,000', '1,000',", "df_clean = df.dropna(how='all', axis=1) print(f'dropped {df.shape[1] - df_clean.shape[1]} columns') # drop rows with", "color=\"red\", s=10, ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1) else: fig, axs = plt.subplots(n, 1, figsize=(width,", "figsize=(width*m, height*n)) # reformat axs so it can be subset in the event", "= [axs] for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name)", "{df.shape[1] - df_clean.shape[1]} columns') # drop rows with NA values df_clean = df_clean.dropna(how='any',", "enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series): nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x:", "for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series):", "columns if groupby: plot_df = plot_df.groupby(groupby) else: pass n = len(columns) if anomaly_columns:", "axis=1) print(f'dropped {df.shape[1] - df_clean.shape[1]} columns') # drop rows with NA values df_clean", "pd import numpy as np import seaborn as sns from matplotlib import pyplot", "len(columns) if anomaly_columns: assert len(anomaly_values) == len(anomaly_columns), 'please provide anomaly value for each", "def plot_time_series(df, columns, index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[], groupby=[], width=10, height=5, anomaly_columns=[], anomaly_values=[]): assert", "assert len(anomaly_values) == len(anomaly_columns), 'please provide anomaly value for each anomaly column indicator'", "that there's only one row or only one column if n==1: axs=[axs] if", "one column if n==1: axs=[axs] if m==1: axs=[[i] for i in axs] for", "label=f'anomaly: {anomaly}', alpha=1) else: fig, axs = plt.subplots(n, 1, figsize=(width, height*n)) if n", "only one row or only one column if n==1: axs=[axs] if m==1: axs=[[i]", "assert isinstance(columns, list), 'columns should be a list' if index_is_timestamp: plot_df = df.loc[start_date:end_date]", "axs=[[i] for i in axs] for col, anomaly in enumerate(anomaly_columns): for row, column", "the event that there's only one row or only one column if n==1:", "filter specific machine number if ma_nr: plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass # group", "as pd import numpy as np import seaborn as sns from matplotlib import", "plot_time_series(df, columns, index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[], groupby=[], width=10, height=5, anomaly_columns=[], anomaly_values=[]): assert isinstance(ma_nr,", "plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series): nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i", "width=10, height=5, anomaly_columns=[], anomaly_values=[]): assert isinstance(ma_nr, list) , 'ma_nr should be a list'", "row or only one column if n==1: axs=[axs] if m==1: axs=[[i] for i", "row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10,", "df.dropna(how='all', axis=1) print(f'dropped {df.shape[1] - df_clean.shape[1]} columns') # drop rows with NA values", "column if n==1: axs=[axs] if m==1: axs=[[i] for i in axs] for col,", "rows with NA values df_clean = df_clean.dropna(how='any', axis=0) print(f'dropped {df.shape[0] - df_clean.shape[0]} rows')", "by columns if groupby: plot_df = plot_df.groupby(groupby) else: pass n = len(columns) if", "= len(columns) if anomaly_columns: assert len(anomaly_values) == len(anomaly_columns), 'please provide anomaly value for", "in enumerate(anomaly_columns): for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]],", "seaborn as sns from matplotlib import pyplot as plt def clean_df(df): # drop", "in the event that there's only one row or only one column if", "ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1) else: fig, axs = plt.subplots(n, 1, figsize=(width, height*n)) if", "x: [i for i in x] if len(x)==10 else None).apply(pd.Series)) nio_df.columns = ['1,000,000,000',", "if n == 1: axs = [axs] for row, column in enumerate(columns): plot_df[column].plot(legend=True,", "xlabel='', ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series): nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for i", "axs so it can be subset in the event that there's only one", "- df_clean.shape[1]} columns') # drop rows with NA values df_clean = df_clean.dropna(how='any', axis=0)", "each anomaly column indicator' m = len(anomaly_columns) fig, axs = plt.subplots(n, m, figsize=(width*m,", "if index_is_timestamp: plot_df = df.loc[start_date:end_date] else: plot_df = df.set_index('INSDATE').loc[start_date:end_date] # filter specific machine", "axs] for col, anomaly in enumerate(anomaly_columns): for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col],", "be subset in the event that there's only one row or only one", "in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series): nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda", "groupby=[], width=10, height=5, anomaly_columns=[], anomaly_values=[]): assert isinstance(ma_nr, list) , 'ma_nr should be a", ", 'ma_nr should be a list' assert isinstance(groupby, list), 'gropuby should be a", "list' assert isinstance(groupby, list), 'gropuby should be a list' assert isinstance(columns, list), 'columns", "index_is_timestamp: plot_df = df.loc[start_date:end_date] else: plot_df = df.set_index('INSDATE').loc[start_date:end_date] # filter specific machine number", "= df.set_index('INSDATE').loc[start_date:end_date] # filter specific machine number if ma_nr: plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else:", "so it can be subset in the event that there's only one row", "def nio_labels(nio_series): nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for i in x] if len(x)==10", "isinstance(groupby, list), 'gropuby should be a list' assert isinstance(columns, list), 'columns should be", "pyplot as plt def clean_df(df): # drop columns containing only NAs df_clean =", "as sns from matplotlib import pyplot as plt def clean_df(df): # drop columns", "col, anomaly in enumerate(anomaly_columns): for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column,", "df_clean.shape[0]} rows') return df_clean def plot_time_series(df, columns, index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[], groupby=[], width=10,", "n == 1: axs = [axs] for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row],", "axs = [axs] for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column, alpha=0.5)", "len(x)==10 else None).apply(pd.Series)) nio_df.columns = ['1,000,000,000', '100,000,000', '10,000,000', '1,000,000', '100,000', '10,000', '1,000', '100',", "fig, axs = plt.subplots(n, m, figsize=(width*m, height*n)) # reformat axs so it can", "[axs] for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name) def", "column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10, ax=axs[row][col],", "else: pass # group by columns if groupby: plot_df = plot_df.groupby(groupby) else: pass", "[i for i in x] if len(x)==10 else None).apply(pd.Series)) nio_df.columns = ['1,000,000,000', '100,000,000',", "indicator' m = len(anomaly_columns) fig, axs = plt.subplots(n, m, figsize=(width*m, height*n)) # reformat", "there's only one row or only one column if n==1: axs=[axs] if m==1:", "matplotlib import pyplot as plt def clean_df(df): # drop columns containing only NAs", "event that there's only one row or only one column if n==1: axs=[axs]", "alpha=0.5) axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series): nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for i in x]", "or only one column if n==1: axs=[axs] if m==1: axs=[[i] for i in", "anomaly value for each anomaly column indicator' m = len(anomaly_columns) fig, axs =", "clean_df(df): # drop columns containing only NAs df_clean = df.dropna(how='all', axis=1) print(f'dropped {df.shape[1]", "only one column if n==1: axs=[axs] if m==1: axs=[[i] for i in axs]", "axs=[axs] if m==1: axs=[[i] for i in axs] for col, anomaly in enumerate(anomaly_columns):", "assert isinstance(groupby, list), 'gropuby should be a list' assert isinstance(columns, list), 'columns should", "subset in the event that there's only one row or only one column", "anomaly_values=[]): assert isinstance(ma_nr, list) , 'ma_nr should be a list' assert isinstance(groupby, list),", "x] if len(x)==10 else None).apply(pd.Series)) nio_df.columns = ['1,000,000,000', '100,000,000', '10,000,000', '1,000,000', '100,000', '10,000',", "drop columns containing only NAs df_clean = df.dropna(how='all', axis=1) print(f'dropped {df.shape[1] - df_clean.shape[1]}", "axs = plt.subplots(n, m, figsize=(width*m, height*n)) # reformat axs so it can be", "axis=0) print(f'dropped {df.shape[0] - df_clean.shape[0]} rows') return df_clean def plot_time_series(df, columns, index_is_timestamp=False, start_date=None,", "'gropuby should be a list' assert isinstance(columns, list), 'columns should be a list'", "a list' assert isinstance(columns, list), 'columns should be a list' if index_is_timestamp: plot_df", "height*n)) # reformat axs so it can be subset in the event that", "a list' if index_is_timestamp: plot_df = df.loc[start_date:end_date] else: plot_df = df.set_index('INSDATE').loc[start_date:end_date] # filter", "plt.subplots(n, m, figsize=(width*m, height*n)) # reformat axs so it can be subset in", "= plt.subplots(n, m, figsize=(width*m, height*n)) # reformat axs so it can be subset", "be a list' if index_is_timestamp: plot_df = df.loc[start_date:end_date] else: plot_df = df.set_index('INSDATE').loc[start_date:end_date] #", "number if ma_nr: plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass # group by columns if", "# group by columns if groupby: plot_df = plot_df.groupby(groupby) else: pass n =", "= plot_df.groupby(groupby) else: pass n = len(columns) if anomaly_columns: assert len(anomaly_values) == len(anomaly_columns),", "in x] if len(x)==10 else None).apply(pd.Series)) nio_df.columns = ['1,000,000,000', '100,000,000', '10,000,000', '1,000,000', '100,000',", "list), 'gropuby should be a list' assert isinstance(columns, list), 'columns should be a", "assert isinstance(ma_nr, list) , 'ma_nr should be a list' assert isinstance(groupby, list), 'gropuby", "'please provide anomaly value for each anomaly column indicator' m = len(anomaly_columns) fig,", "== len(anomaly_columns), 'please provide anomaly value for each anomaly column indicator' m =", "- df_clean.shape[0]} rows') return df_clean def plot_time_series(df, columns, index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[], groupby=[],", "sns from matplotlib import pyplot as plt def clean_df(df): # drop columns containing", "return df_clean def plot_time_series(df, columns, index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[], groupby=[], width=10, height=5, anomaly_columns=[],", "index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[], groupby=[], width=10, height=5, anomaly_columns=[], anomaly_values=[]): assert isinstance(ma_nr, list) ,", "reformat axs so it can be subset in the event that there's only", "df_clean = df_clean.dropna(how='any', axis=0) print(f'dropped {df.shape[0] - df_clean.shape[0]} rows') return df_clean def plot_time_series(df,", "ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10, ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1) else: fig, axs", "else: fig, axs = plt.subplots(n, 1, figsize=(width, height*n)) if n == 1: axs", "specific machine number if ma_nr: plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass # group by", "plot_df = df.loc[start_date:end_date] else: plot_df = df.set_index('INSDATE').loc[start_date:end_date] # filter specific machine number if", "# reformat axs so it can be subset in the event that there's", "1, figsize=(width, height*n)) if n == 1: axs = [axs] for row, column", "import pandas as pd import numpy as np import seaborn as sns from", "anomaly column indicator' m = len(anomaly_columns) fig, axs = plt.subplots(n, m, figsize=(width*m, height*n))", "# drop rows with NA values df_clean = df_clean.dropna(how='any', axis=0) print(f'dropped {df.shape[0] -", "pass n = len(columns) if anomaly_columns: assert len(anomaly_values) == len(anomaly_columns), 'please provide anomaly", "enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10, ax=axs[row][col], label=f'anomaly: {anomaly}',", "if m==1: axs=[[i] for i in axs] for col, anomaly in enumerate(anomaly_columns): for", "i in x] if len(x)==10 else None).apply(pd.Series)) nio_df.columns = ['1,000,000,000', '100,000,000', '10,000,000', '1,000,000',", "ma_nr: plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass # group by columns if groupby: plot_df", "df.loc[start_date:end_date] else: plot_df = df.set_index('INSDATE').loc[start_date:end_date] # filter specific machine number if ma_nr: plot_df", "if ma_nr: plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass # group by columns if groupby:", "a list' assert isinstance(groupby, list), 'gropuby should be a list' assert isinstance(columns, list),", "<filename>notebooks/MS_Functions.py import pandas as pd import numpy as np import seaborn as sns", "ma_nr=[], groupby=[], width=10, height=5, anomaly_columns=[], anomaly_values=[]): assert isinstance(ma_nr, list) , 'ma_nr should be", "numpy as np import seaborn as sns from matplotlib import pyplot as plt", "as plt def clean_df(df): # drop columns containing only NAs df_clean = df.dropna(how='all',", "plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass # group by columns if groupby: plot_df =", "it can be subset in the event that there's only one row or", "i in axs] for col, anomaly in enumerate(anomaly_columns): for row, column in enumerate(columns):", "else: pass n = len(columns) if anomaly_columns: assert len(anomaly_values) == len(anomaly_columns), 'please provide", "fig, axs = plt.subplots(n, 1, figsize=(width, height*n)) if n == 1: axs =", "for i in x] if len(x)==10 else None).apply(pd.Series)) nio_df.columns = ['1,000,000,000', '100,000,000', '10,000,000',", "nio_df.columns = ['1,000,000,000', '100,000,000', '10,000,000', '1,000,000', '100,000', '10,000', '1,000', '100', '10', '1'] return", "list' assert isinstance(columns, list), 'columns should be a list' if index_is_timestamp: plot_df =", "if n==1: axs=[axs] if m==1: axs=[[i] for i in axs] for col, anomaly", "start_date=None, end_date=None, ma_nr=[], groupby=[], width=10, height=5, anomaly_columns=[], anomaly_values=[]): assert isinstance(ma_nr, list) , 'ma_nr", "isinstance(ma_nr, list) , 'ma_nr should be a list' assert isinstance(groupby, list), 'gropuby should", "pass # group by columns if groupby: plot_df = plot_df.groupby(groupby) else: pass n", "figsize=(width, height*n)) if n == 1: axs = [axs] for row, column in", "anomaly_columns: assert len(anomaly_values) == len(anomaly_columns), 'please provide anomaly value for each anomaly column", "alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10, ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1) else: fig, axs =", "ax=axs[row], xlabel='', ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series): nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for", "None).apply(pd.Series)) nio_df.columns = ['1,000,000,000', '100,000,000', '10,000,000', '1,000,000', '100,000', '10,000', '1,000', '100', '10', '1']", "one row or only one column if n==1: axs=[axs] if m==1: axs=[[i] for", "groupby: plot_df = plot_df.groupby(groupby) else: pass n = len(columns) if anomaly_columns: assert len(anomaly_values)", "else: plot_df = df.set_index('INSDATE').loc[start_date:end_date] # filter specific machine number if ma_nr: plot_df =", "len(anomaly_columns) fig, axs = plt.subplots(n, m, figsize=(width*m, height*n)) # reformat axs so it", "s=10, ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1) else: fig, axs = plt.subplots(n, 1, figsize=(width, height*n))", "list) , 'ma_nr should be a list' assert isinstance(groupby, list), 'gropuby should be", "rows') return df_clean def plot_time_series(df, columns, index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[], groupby=[], width=10, height=5,", "nio_labels(nio_series): nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for i in x] if len(x)==10 else", "columns, index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[], groupby=[], width=10, height=5, anomaly_columns=[], anomaly_values=[]): assert isinstance(ma_nr, list)", "def clean_df(df): # drop columns containing only NAs df_clean = df.dropna(how='all', axis=1) print(f'dropped", "= len(anomaly_columns) fig, axs = plt.subplots(n, m, figsize=(width*m, height*n)) # reformat axs so", "axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series): nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for i in x] if", "list' if index_is_timestamp: plot_df = df.loc[start_date:end_date] else: plot_df = df.set_index('INSDATE').loc[start_date:end_date] # filter specific", "= pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for i in x] if len(x)==10 else None).apply(pd.Series)) nio_df.columns", "df_clean.shape[1]} columns') # drop rows with NA values df_clean = df_clean.dropna(how='any', axis=0) print(f'dropped", "plt def clean_df(df): # drop columns containing only NAs df_clean = df.dropna(how='all', axis=1)", "= df.loc[start_date:end_date] else: plot_df = df.set_index('INSDATE').loc[start_date:end_date] # filter specific machine number if ma_nr:", "# filter specific machine number if ma_nr: plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass #", "column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series): nio_df =", "isinstance(columns, list), 'columns should be a list' if index_is_timestamp: plot_df = df.loc[start_date:end_date] else:", "plt.subplots(n, 1, figsize=(width, height*n)) if n == 1: axs = [axs] for row,", "with NA values df_clean = df_clean.dropna(how='any', axis=0) print(f'dropped {df.shape[0] - df_clean.shape[0]} rows') return", "row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series): nio_df", "be a list' assert isinstance(groupby, list), 'gropuby should be a list' assert isinstance(columns,", "import pyplot as plt def clean_df(df): # drop columns containing only NAs df_clean", "= plt.subplots(n, 1, figsize=(width, height*n)) if n == 1: axs = [axs] for", "provide anomaly value for each anomaly column indicator' m = len(anomaly_columns) fig, axs", "end_date=None, ma_nr=[], groupby=[], width=10, height=5, anomaly_columns=[], anomaly_values=[]): assert isinstance(ma_nr, list) , 'ma_nr should", "df_clean.dropna(how='any', axis=0) print(f'dropped {df.shape[0] - df_clean.shape[0]} rows') return df_clean def plot_time_series(df, columns, index_is_timestamp=False,", "{anomaly}', alpha=1) else: fig, axs = plt.subplots(n, 1, figsize=(width, height*n)) if n ==", "machine number if ma_nr: plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass # group by columns", "1: axs = [axs] for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row], xlabel='', ylabel=column,", "anomaly_columns=[], anomaly_values=[]): assert isinstance(ma_nr, list) , 'ma_nr should be a list' assert isinstance(groupby,", "plot_df = plot_df.groupby(groupby) else: pass n = len(columns) if anomaly_columns: assert len(anomaly_values) ==", "group by columns if groupby: plot_df = plot_df.groupby(groupby) else: pass n = len(columns)", "= df_clean.dropna(how='any', axis=0) print(f'dropped {df.shape[0] - df_clean.shape[0]} rows') return df_clean def plot_time_series(df, columns,", "should be a list' if index_is_timestamp: plot_df = df.loc[start_date:end_date] else: plot_df = df.set_index('INSDATE').loc[start_date:end_date]", "in axs] for col, anomaly in enumerate(anomaly_columns): for row, column in enumerate(columns): plot_df[column].plot(legend=True,", "columns') # drop rows with NA values df_clean = df_clean.dropna(how='any', axis=0) print(f'dropped {df.shape[0]", "if groupby: plot_df = plot_df.groupby(groupby) else: pass n = len(columns) if anomaly_columns: assert", "{df.shape[0] - df_clean.shape[0]} rows') return df_clean def plot_time_series(df, columns, index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[],", "axs = plt.subplots(n, 1, figsize=(width, height*n)) if n == 1: axs = [axs]", "print(f'dropped {df.shape[1] - df_clean.shape[1]} columns') # drop rows with NA values df_clean =", "np import seaborn as sns from matplotlib import pyplot as plt def clean_df(df):", "m==1: axs=[[i] for i in axs] for col, anomaly in enumerate(anomaly_columns): for row,", "ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10, ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1) else:", "height*n)) if n == 1: axs = [axs] for row, column in enumerate(columns):", "xlabel='', ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10, ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1) else: fig,", "column indicator' m = len(anomaly_columns) fig, axs = plt.subplots(n, m, figsize=(width*m, height*n)) #", "= ['1,000,000,000', '100,000,000', '10,000,000', '1,000,000', '100,000', '10,000', '1,000', '100', '10', '1'] return nio_df", "for i in axs] for col, anomaly in enumerate(anomaly_columns): for row, column in", "n = len(columns) if anomaly_columns: assert len(anomaly_values) == len(anomaly_columns), 'please provide anomaly value", "pandas as pd import numpy as np import seaborn as sns from matplotlib", "can be subset in the event that there's only one row or only", "alpha=1) else: fig, axs = plt.subplots(n, 1, figsize=(width, height*n)) if n == 1:", "enumerate(anomaly_columns): for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]],", "NA values df_clean = df_clean.dropna(how='any', axis=0) print(f'dropped {df.shape[0] - df_clean.shape[0]} rows') return df_clean", "= plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass # group by columns if groupby: plot_df = plot_df.groupby(groupby)", "nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for i in x] if len(x)==10 else None).apply(pd.Series))", "y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10, ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1) else: fig, axs = plt.subplots(n, 1,", "value for each anomaly column indicator' m = len(anomaly_columns) fig, axs = plt.subplots(n,", "'columns should be a list' if index_is_timestamp: plot_df = df.loc[start_date:end_date] else: plot_df =", "only NAs df_clean = df.dropna(how='all', axis=1) print(f'dropped {df.shape[1] - df_clean.shape[1]} columns') # drop", "be a list' assert isinstance(columns, list), 'columns should be a list' if index_is_timestamp:", "import seaborn as sns from matplotlib import pyplot as plt def clean_df(df): #", "len(anomaly_values) == len(anomaly_columns), 'please provide anomaly value for each anomaly column indicator' m", "m = len(anomaly_columns) fig, axs = plt.subplots(n, m, figsize=(width*m, height*n)) # reformat axs", "sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10, ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1) else: fig, axs = plt.subplots(n,", "df.set_index('INSDATE').loc[start_date:end_date] # filter specific machine number if ma_nr: plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass", "containing only NAs df_clean = df.dropna(how='all', axis=1) print(f'dropped {df.shape[1] - df_clean.shape[1]} columns') #", "for each anomaly column indicator' m = len(anomaly_columns) fig, axs = plt.subplots(n, m,", "should be a list' assert isinstance(columns, list), 'columns should be a list' if", "'ma_nr should be a list' assert isinstance(groupby, list), 'gropuby should be a list'", "plot_df.groupby(groupby) else: pass n = len(columns) if anomaly_columns: assert len(anomaly_values) == len(anomaly_columns), 'please", "list), 'columns should be a list' if index_is_timestamp: plot_df = df.loc[start_date:end_date] else: plot_df", "plot_df.loc[plot_df['MA_NR'].isin(ma_nr)] else: pass # group by columns if groupby: plot_df = plot_df.groupby(groupby) else:", "pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for i in x] if len(x)==10 else None).apply(pd.Series)) nio_df.columns =", "= df.dropna(how='all', axis=1) print(f'dropped {df.shape[1] - df_clean.shape[1]} columns') # drop rows with NA", "NAs df_clean = df.dropna(how='all', axis=1) print(f'dropped {df.shape[1] - df_clean.shape[1]} columns') # drop rows", "print(f'dropped {df.shape[0] - df_clean.shape[0]} rows') return df_clean def plot_time_series(df, columns, index_is_timestamp=False, start_date=None, end_date=None,", "# drop columns containing only NAs df_clean = df.dropna(how='all', axis=1) print(f'dropped {df.shape[1] -", "else None).apply(pd.Series)) nio_df.columns = ['1,000,000,000', '100,000,000', '10,000,000', '1,000,000', '100,000', '10,000', '1,000', '100', '10',", "== 1: axs = [axs] for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row], xlabel='',", "should be a list' assert isinstance(groupby, list), 'gropuby should be a list' assert", "in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10, ax=axs[row][col], label=f'anomaly:", "n==1: axs=[axs] if m==1: axs=[[i] for i in axs] for col, anomaly in", "plot_df = df.set_index('INSDATE').loc[start_date:end_date] # filter specific machine number if ma_nr: plot_df = plot_df.loc[plot_df['MA_NR'].isin(ma_nr)]", "if anomaly_columns: assert len(anomaly_values) == len(anomaly_columns), 'please provide anomaly value for each anomaly", "ylabel=column, alpha=0.5) axs[n-1].set_xlabel(df.index.name) def nio_labels(nio_series): nio_df = pd.DataFrame(nio_series.astype(str).str.rjust(10,'0').apply(lambda x: [i for i in", "drop rows with NA values df_clean = df_clean.dropna(how='any', axis=0) print(f'dropped {df.shape[0] - df_clean.shape[0]}", "for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\",", "values df_clean = df_clean.dropna(how='any', axis=0) print(f'dropped {df.shape[0] - df_clean.shape[0]} rows') return df_clean def", "plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5) sns.scatterplot(x=plot_df.index[plot_df[anomaly]==anomaly_values[col]], y=plot_df[column].loc[plot_df[anomaly]==anomaly_values[col]], color=\"red\", s=10, ax=axs[row][col], label=f'anomaly: {anomaly}', alpha=1)", "for col, anomaly in enumerate(anomaly_columns): for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='',", "import numpy as np import seaborn as sns from matplotlib import pyplot as", "anomaly in enumerate(anomaly_columns): for row, column in enumerate(columns): plot_df[column].plot(legend=True, ax=axs[row][col], xlabel='', ylabel=column, alpha=0.5)", "from matplotlib import pyplot as plt def clean_df(df): # drop columns containing only", "m, figsize=(width*m, height*n)) # reformat axs so it can be subset in the", "as np import seaborn as sns from matplotlib import pyplot as plt def", "df_clean def plot_time_series(df, columns, index_is_timestamp=False, start_date=None, end_date=None, ma_nr=[], groupby=[], width=10, height=5, anomaly_columns=[], anomaly_values=[]):", "columns containing only NAs df_clean = df.dropna(how='all', axis=1) print(f'dropped {df.shape[1] - df_clean.shape[1]} columns')", "height=5, anomaly_columns=[], anomaly_values=[]): assert isinstance(ma_nr, list) , 'ma_nr should be a list' assert" ]
[ "True subparsers.dest = \"command\" return subparsers def create_parser() -> ArgParser: \"\"\" Create argument", "argparse.ArgumentParser object \"\"\" parser = ArgParser(description=\"Steampunk Scanner - a quality scanner for Ansible", "subparsers.dest = \"command\" return subparsers def create_parser() -> ArgParser: \"\"\" Create argument parser", "{}\\n\".format(message)) self.print_help() sys.exit(2) def add_subparsers(self, **kwargs) -> argparse._SubParsersAction: \"\"\" Overridden the original add_subparsers", "self).add_subparsers() subparsers.required = True subparsers.dest = \"command\" return subparsers def create_parser() -> ArgParser:", "method :param message: Error message \"\"\" sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help() sys.exit(2) def add_subparsers(self, **kwargs)", "self.print_help() sys.exit(2) def add_subparsers(self, **kwargs) -> argparse._SubParsersAction: \"\"\" Overridden the original add_subparsers method", "error method :param message: Error message \"\"\" sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help() sys.exit(2) def add_subparsers(self,", "that displays help on error\"\"\" def error(self, message: str): \"\"\" Overridden the original", "(workaround for http://bugs.python.org/issue9253) \"\"\" subparsers = super(ArgParser, self).add_subparsers() subparsers.required = True subparsers.dest =", "method (workaround for http://bugs.python.org/issue9253) \"\"\" subparsers = super(ArgParser, self).add_subparsers() subparsers.required = True subparsers.dest", "help on error\"\"\" def error(self, message: str): \"\"\" Overridden the original error method", "sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help() sys.exit(2) def add_subparsers(self, **kwargs) -> argparse._SubParsersAction: \"\"\" Overridden the original", "from steampunk_scanner import commands class ArgParser(argparse.ArgumentParser): \"\"\"An argument parser that displays help on", "CLI method to be called \"\"\" parser = create_parser() args = parser.parse_args() return", "steampunk_scanner import commands class ArgParser(argparse.ArgumentParser): \"\"\"An argument parser that displays help on error\"\"\"", "inspect import sys from steampunk_scanner import commands class ArgParser(argparse.ArgumentParser): \"\"\"An argument parser that", "subparsers.required = True subparsers.dest = \"command\" return subparsers def create_parser() -> ArgParser: \"\"\"", "ArgParser(description=\"Steampunk Scanner - a quality scanner for Ansible Playbooks\") subparsers = parser.add_subparsers() cmds", "create_parser() -> ArgParser: \"\"\" Create argument parser for CLI :return: Parser as argparse.ArgumentParser", "def main() -> ArgParser: \"\"\" Main CLI method to be called \"\"\" parser", "= \"command\" return subparsers def create_parser() -> ArgParser: \"\"\" Create argument parser for", "import argparse import inspect import sys from steampunk_scanner import commands class ArgParser(argparse.ArgumentParser): \"\"\"An", "x[0]): module.add_parser(subparsers) return parser def main() -> ArgParser: \"\"\" Main CLI method to", "the original add_subparsers method (workaround for http://bugs.python.org/issue9253) \"\"\" subparsers = super(ArgParser, self).add_subparsers() subparsers.required", "-> ArgParser: \"\"\" Create argument parser for CLI :return: Parser as argparse.ArgumentParser object", "\"\"\" Overridden the original error method :param message: Error message \"\"\" sys.stderr.write(\"error: {}\\n\".format(message))", "parser.add_subparsers() cmds = inspect.getmembers(commands, inspect.ismodule) for _, module in sorted(cmds, key=lambda x: x[0]):", "cmds = inspect.getmembers(commands, inspect.ismodule) for _, module in sorted(cmds, key=lambda x: x[0]): module.add_parser(subparsers)", "def add_subparsers(self, **kwargs) -> argparse._SubParsersAction: \"\"\" Overridden the original add_subparsers method (workaround for", "for _, module in sorted(cmds, key=lambda x: x[0]): module.add_parser(subparsers) return parser def main()", "\"\"\" subparsers = super(ArgParser, self).add_subparsers() subparsers.required = True subparsers.dest = \"command\" return subparsers", "module.add_parser(subparsers) return parser def main() -> ArgParser: \"\"\" Main CLI method to be", "as argparse.ArgumentParser object \"\"\" parser = ArgParser(description=\"Steampunk Scanner - a quality scanner for", "Main CLI method to be called \"\"\" parser = create_parser() args = parser.parse_args()", "return subparsers def create_parser() -> ArgParser: \"\"\" Create argument parser for CLI :return:", "**kwargs) -> argparse._SubParsersAction: \"\"\" Overridden the original add_subparsers method (workaround for http://bugs.python.org/issue9253) \"\"\"", ":return: Parser as argparse.ArgumentParser object \"\"\" parser = ArgParser(description=\"Steampunk Scanner - a quality", "super(ArgParser, self).add_subparsers() subparsers.required = True subparsers.dest = \"command\" return subparsers def create_parser() ->", "http://bugs.python.org/issue9253) \"\"\" subparsers = super(ArgParser, self).add_subparsers() subparsers.required = True subparsers.dest = \"command\" return", "Ansible Playbooks\") subparsers = parser.add_subparsers() cmds = inspect.getmembers(commands, inspect.ismodule) for _, module in", "message: Error message \"\"\" sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help() sys.exit(2) def add_subparsers(self, **kwargs) -> argparse._SubParsersAction:", "\"\"\"An argument parser that displays help on error\"\"\" def error(self, message: str): \"\"\"", "def create_parser() -> ArgParser: \"\"\" Create argument parser for CLI :return: Parser as", "Overridden the original add_subparsers method (workaround for http://bugs.python.org/issue9253) \"\"\" subparsers = super(ArgParser, self).add_subparsers()", "commands class ArgParser(argparse.ArgumentParser): \"\"\"An argument parser that displays help on error\"\"\" def error(self,", "original add_subparsers method (workaround for http://bugs.python.org/issue9253) \"\"\" subparsers = super(ArgParser, self).add_subparsers() subparsers.required =", "Playbooks\") subparsers = parser.add_subparsers() cmds = inspect.getmembers(commands, inspect.ismodule) for _, module in sorted(cmds,", "subparsers def create_parser() -> ArgParser: \"\"\" Create argument parser for CLI :return: Parser", "a quality scanner for Ansible Playbooks\") subparsers = parser.add_subparsers() cmds = inspect.getmembers(commands, inspect.ismodule)", "error(self, message: str): \"\"\" Overridden the original error method :param message: Error message", "= True subparsers.dest = \"command\" return subparsers def create_parser() -> ArgParser: \"\"\" Create", "\"\"\" parser = ArgParser(description=\"Steampunk Scanner - a quality scanner for Ansible Playbooks\") subparsers", "method to be called \"\"\" parser = create_parser() args = parser.parse_args() return args.func(args)", "inspect.ismodule) for _, module in sorted(cmds, key=lambda x: x[0]): module.add_parser(subparsers) return parser def", "sorted(cmds, key=lambda x: x[0]): module.add_parser(subparsers) return parser def main() -> ArgParser: \"\"\" Main", "= inspect.getmembers(commands, inspect.ismodule) for _, module in sorted(cmds, key=lambda x: x[0]): module.add_parser(subparsers) return", "displays help on error\"\"\" def error(self, message: str): \"\"\" Overridden the original error", "CLI :return: Parser as argparse.ArgumentParser object \"\"\" parser = ArgParser(description=\"Steampunk Scanner - a", "sys.exit(2) def add_subparsers(self, **kwargs) -> argparse._SubParsersAction: \"\"\" Overridden the original add_subparsers method (workaround", "argument parser that displays help on error\"\"\" def error(self, message: str): \"\"\" Overridden", "\"\"\" Create argument parser for CLI :return: Parser as argparse.ArgumentParser object \"\"\" parser", "message: str): \"\"\" Overridden the original error method :param message: Error message \"\"\"", "add_subparsers(self, **kwargs) -> argparse._SubParsersAction: \"\"\" Overridden the original add_subparsers method (workaround for http://bugs.python.org/issue9253)", "str): \"\"\" Overridden the original error method :param message: Error message \"\"\" sys.stderr.write(\"error:", "quality scanner for Ansible Playbooks\") subparsers = parser.add_subparsers() cmds = inspect.getmembers(commands, inspect.ismodule) for", "for http://bugs.python.org/issue9253) \"\"\" subparsers = super(ArgParser, self).add_subparsers() subparsers.required = True subparsers.dest = \"command\"", "\"command\" return subparsers def create_parser() -> ArgParser: \"\"\" Create argument parser for CLI", ":param message: Error message \"\"\" sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help() sys.exit(2) def add_subparsers(self, **kwargs) ->", "= ArgParser(description=\"Steampunk Scanner - a quality scanner for Ansible Playbooks\") subparsers = parser.add_subparsers()", "Scanner - a quality scanner for Ansible Playbooks\") subparsers = parser.add_subparsers() cmds =", "on error\"\"\" def error(self, message: str): \"\"\" Overridden the original error method :param", "_, module in sorted(cmds, key=lambda x: x[0]): module.add_parser(subparsers) return parser def main() ->", "def error(self, message: str): \"\"\" Overridden the original error method :param message: Error", "ArgParser: \"\"\" Main CLI method to be called \"\"\" parser = create_parser() args", "return parser def main() -> ArgParser: \"\"\" Main CLI method to be called", "parser that displays help on error\"\"\" def error(self, message: str): \"\"\" Overridden the", "-> ArgParser: \"\"\" Main CLI method to be called \"\"\" parser = create_parser()", "\"\"\" Main CLI method to be called \"\"\" parser = create_parser() args =", "error\"\"\" def error(self, message: str): \"\"\" Overridden the original error method :param message:", "original error method :param message: Error message \"\"\" sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help() sys.exit(2) def", "- a quality scanner for Ansible Playbooks\") subparsers = parser.add_subparsers() cmds = inspect.getmembers(commands,", "= parser.add_subparsers() cmds = inspect.getmembers(commands, inspect.ismodule) for _, module in sorted(cmds, key=lambda x:", "import inspect import sys from steampunk_scanner import commands class ArgParser(argparse.ArgumentParser): \"\"\"An argument parser", "argument parser for CLI :return: Parser as argparse.ArgumentParser object \"\"\" parser = ArgParser(description=\"Steampunk", "\"\"\" Overridden the original add_subparsers method (workaround for http://bugs.python.org/issue9253) \"\"\" subparsers = super(ArgParser,", "ArgParser(argparse.ArgumentParser): \"\"\"An argument parser that displays help on error\"\"\" def error(self, message: str):", "parser = ArgParser(description=\"Steampunk Scanner - a quality scanner for Ansible Playbooks\") subparsers =", "parser def main() -> ArgParser: \"\"\" Main CLI method to be called \"\"\"", "argparse._SubParsersAction: \"\"\" Overridden the original add_subparsers method (workaround for http://bugs.python.org/issue9253) \"\"\" subparsers =", "Create argument parser for CLI :return: Parser as argparse.ArgumentParser object \"\"\" parser =", "import sys from steampunk_scanner import commands class ArgParser(argparse.ArgumentParser): \"\"\"An argument parser that displays", "in sorted(cmds, key=lambda x: x[0]): module.add_parser(subparsers) return parser def main() -> ArgParser: \"\"\"", "x: x[0]): module.add_parser(subparsers) return parser def main() -> ArgParser: \"\"\" Main CLI method", "parser for CLI :return: Parser as argparse.ArgumentParser object \"\"\" parser = ArgParser(description=\"Steampunk Scanner", "<gh_stars>1-10 import argparse import inspect import sys from steampunk_scanner import commands class ArgParser(argparse.ArgumentParser):", "class ArgParser(argparse.ArgumentParser): \"\"\"An argument parser that displays help on error\"\"\" def error(self, message:", "Overridden the original error method :param message: Error message \"\"\" sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help()", "for CLI :return: Parser as argparse.ArgumentParser object \"\"\" parser = ArgParser(description=\"Steampunk Scanner -", "sys from steampunk_scanner import commands class ArgParser(argparse.ArgumentParser): \"\"\"An argument parser that displays help", "module in sorted(cmds, key=lambda x: x[0]): module.add_parser(subparsers) return parser def main() -> ArgParser:", "add_subparsers method (workaround for http://bugs.python.org/issue9253) \"\"\" subparsers = super(ArgParser, self).add_subparsers() subparsers.required = True", "subparsers = super(ArgParser, self).add_subparsers() subparsers.required = True subparsers.dest = \"command\" return subparsers def", "subparsers = parser.add_subparsers() cmds = inspect.getmembers(commands, inspect.ismodule) for _, module in sorted(cmds, key=lambda", "argparse import inspect import sys from steampunk_scanner import commands class ArgParser(argparse.ArgumentParser): \"\"\"An argument", "import commands class ArgParser(argparse.ArgumentParser): \"\"\"An argument parser that displays help on error\"\"\" def", "main() -> ArgParser: \"\"\" Main CLI method to be called \"\"\" parser =", "\"\"\" sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help() sys.exit(2) def add_subparsers(self, **kwargs) -> argparse._SubParsersAction: \"\"\" Overridden the", "ArgParser: \"\"\" Create argument parser for CLI :return: Parser as argparse.ArgumentParser object \"\"\"", "key=lambda x: x[0]): module.add_parser(subparsers) return parser def main() -> ArgParser: \"\"\" Main CLI", "Parser as argparse.ArgumentParser object \"\"\" parser = ArgParser(description=\"Steampunk Scanner - a quality scanner", "scanner for Ansible Playbooks\") subparsers = parser.add_subparsers() cmds = inspect.getmembers(commands, inspect.ismodule) for _,", "inspect.getmembers(commands, inspect.ismodule) for _, module in sorted(cmds, key=lambda x: x[0]): module.add_parser(subparsers) return parser", "-> argparse._SubParsersAction: \"\"\" Overridden the original add_subparsers method (workaround for http://bugs.python.org/issue9253) \"\"\" subparsers", "= super(ArgParser, self).add_subparsers() subparsers.required = True subparsers.dest = \"command\" return subparsers def create_parser()", "object \"\"\" parser = ArgParser(description=\"Steampunk Scanner - a quality scanner for Ansible Playbooks\")", "Error message \"\"\" sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help() sys.exit(2) def add_subparsers(self, **kwargs) -> argparse._SubParsersAction: \"\"\"", "the original error method :param message: Error message \"\"\" sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help() sys.exit(2)", "message \"\"\" sys.stderr.write(\"error: {}\\n\".format(message)) self.print_help() sys.exit(2) def add_subparsers(self, **kwargs) -> argparse._SubParsersAction: \"\"\" Overridden", "for Ansible Playbooks\") subparsers = parser.add_subparsers() cmds = inspect.getmembers(commands, inspect.ismodule) for _, module" ]
[ "SET LOGGER c_logger.configure_logger() logger = logging.getLogger(__name__) def lambda_handler(event: Dict, context: Dict) -> None:", "\"\"\" AWS lambda start \"\"\" # CHANGE LOGGER logger = logging.getLogger() logger.setLevel(settings.LOGGER_LEVEL) logger.debug(json.dumps(event,", "sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils import logger as c_logger from forward_recieved_email.config import settings from forward_recieved_email", "import logger as c_logger from forward_recieved_email.config import settings from forward_recieved_email import processing #", "from forward_recieved_email.utils import logger as c_logger from forward_recieved_email.config import settings from forward_recieved_email import", "Dict) -> None: \"\"\" AWS lambda start \"\"\" # CHANGE LOGGER logger =", "logger as c_logger from forward_recieved_email.config import settings from forward_recieved_email import processing # SET", "forward_recieved_email import processing # SET LOGGER c_logger.configure_logger() logger = logging.getLogger(__name__) def lambda_handler(event: Dict,", "c_logger from forward_recieved_email.config import settings from forward_recieved_email import processing # SET LOGGER c_logger.configure_logger()", "forward_recieved_email.utils import logger as c_logger from forward_recieved_email.config import settings from forward_recieved_email import processing", "lambda_handler(event: Dict, context: Dict) -> None: \"\"\" AWS lambda start \"\"\" # CHANGE", "-> None: \"\"\" AWS lambda start \"\"\" # CHANGE LOGGER logger = logging.getLogger()", "None: \"\"\" AWS lambda start \"\"\" # CHANGE LOGGER logger = logging.getLogger() logger.setLevel(settings.LOGGER_LEVEL)", "CHANGE LOGGER logger = logging.getLogger() logger.setLevel(settings.LOGGER_LEVEL) logger.debug(json.dumps(event, indent=4)) result = processing.main_handler(event) return result", "as c_logger from forward_recieved_email.config import settings from forward_recieved_email import processing # SET LOGGER", "json import logging from typing import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils import logger as", "os import json import logging from typing import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils import", "\"\"\" # CHANGE LOGGER logger = logging.getLogger() logger.setLevel(settings.LOGGER_LEVEL) logger.debug(json.dumps(event, indent=4)) result = processing.main_handler(event)", "<gh_stars>0 import sys import os import json import logging from typing import Dict", "Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils import logger as c_logger from forward_recieved_email.config import settings from", "import settings from forward_recieved_email import processing # SET LOGGER c_logger.configure_logger() logger = logging.getLogger(__name__)", "def lambda_handler(event: Dict, context: Dict) -> None: \"\"\" AWS lambda start \"\"\" #", "context: Dict) -> None: \"\"\" AWS lambda start \"\"\" # CHANGE LOGGER logger", "processing # SET LOGGER c_logger.configure_logger() logger = logging.getLogger(__name__) def lambda_handler(event: Dict, context: Dict)", "import json import logging from typing import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils import logger", "LOGGER c_logger.configure_logger() logger = logging.getLogger(__name__) def lambda_handler(event: Dict, context: Dict) -> None: \"\"\"", "= logging.getLogger(__name__) def lambda_handler(event: Dict, context: Dict) -> None: \"\"\" AWS lambda start", "logger = logging.getLogger(__name__) def lambda_handler(event: Dict, context: Dict) -> None: \"\"\" AWS lambda", "logging.getLogger(__name__) def lambda_handler(event: Dict, context: Dict) -> None: \"\"\" AWS lambda start \"\"\"", "typing import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils import logger as c_logger from forward_recieved_email.config import", "AWS lambda start \"\"\" # CHANGE LOGGER logger = logging.getLogger() logger.setLevel(settings.LOGGER_LEVEL) logger.debug(json.dumps(event, indent=4))", "# CHANGE LOGGER logger = logging.getLogger() logger.setLevel(settings.LOGGER_LEVEL) logger.debug(json.dumps(event, indent=4)) result = processing.main_handler(event) return", "forward_recieved_email.config import settings from forward_recieved_email import processing # SET LOGGER c_logger.configure_logger() logger =", "from typing import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils import logger as c_logger from forward_recieved_email.config", "logging from typing import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils import logger as c_logger from", "sys import os import json import logging from typing import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from", "# SET LOGGER c_logger.configure_logger() logger = logging.getLogger(__name__) def lambda_handler(event: Dict, context: Dict) ->", "import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils import logger as c_logger from forward_recieved_email.config import settings", "Dict, context: Dict) -> None: \"\"\" AWS lambda start \"\"\" # CHANGE LOGGER", "c_logger.configure_logger() logger = logging.getLogger(__name__) def lambda_handler(event: Dict, context: Dict) -> None: \"\"\" AWS", "from forward_recieved_email.config import settings from forward_recieved_email import processing # SET LOGGER c_logger.configure_logger() logger", "import processing # SET LOGGER c_logger.configure_logger() logger = logging.getLogger(__name__) def lambda_handler(event: Dict, context:", "start \"\"\" # CHANGE LOGGER logger = logging.getLogger() logger.setLevel(settings.LOGGER_LEVEL) logger.debug(json.dumps(event, indent=4)) result =", "from forward_recieved_email import processing # SET LOGGER c_logger.configure_logger() logger = logging.getLogger(__name__) def lambda_handler(event:", "import logging from typing import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils import logger as c_logger", "import os import json import logging from typing import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from forward_recieved_email.utils", "settings from forward_recieved_email import processing # SET LOGGER c_logger.configure_logger() logger = logging.getLogger(__name__) def", "import sys import os import json import logging from typing import Dict sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))", "lambda start \"\"\" # CHANGE LOGGER logger = logging.getLogger() logger.setLevel(settings.LOGGER_LEVEL) logger.debug(json.dumps(event, indent=4)) result" ]
[ "by Django 3.1.2 on 2021-01-25 08:11 from django.db import migrations, models import django.db.models.deletion", "dependencies = [ ] operations = [ migrations.CreateModel( name='TwitterUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "('is_retweet', models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)), ('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='db.twitteruser')), ], options={ 'db_table': 'tweets', }, ),", "('tweet_text', models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3, null=True)), ('tweet_id', models.CharField(db_index=True, max_length=20, null=True)), ('tweet_info', models.JSONField()),", "options={ 'db_table': 'twitter_users', }, ), migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3, null=True)), ('tweet_id',", "name='TwitterUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('screen_name', models.CharField(max_length=15)), ], options={ 'db_table': 'twitter_users',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('screen_name', models.CharField(max_length=15)), ], options={ 'db_table': 'twitter_users', }, ), migrations.CreateModel(", "models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)), ('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='db.twitteruser')), ], options={ 'db_table': 'tweets', }, ), ]", "('tweet_info', models.JSONField()), ('is_retweet', models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)), ('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='db.twitteruser')), ], options={ 'db_table': 'tweets',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('screen_name', models.CharField(max_length=15)), ], options={ 'db_table': 'twitter_users', },", "null=True)), ('tweet_info', models.JSONField()), ('is_retweet', models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)), ('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='db.twitteruser')), ], options={ 'db_table':", "), migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()),", "migrations.CreateModel( name='TwitterUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('screen_name', models.CharField(max_length=15)), ], options={ 'db_table':", "models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3, null=True)), ('tweet_id', models.CharField(db_index=True, max_length=20, null=True)), ('tweet_info', models.JSONField()), ('is_retweet', models.BooleanField(default=True)), ('retweet_count',", "('tweet_id', models.CharField(db_index=True, max_length=20, null=True)), ('tweet_info', models.JSONField()), ('is_retweet', models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)), ('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='db.twitteruser')),", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [", "import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations =", "models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations", "primary_key=True, serialize=False, verbose_name='ID')), ('screen_name', models.CharField(max_length=15)), ], options={ 'db_table': 'twitter_users', }, ), migrations.CreateModel( name='Tweet',", "'twitter_users', }, ), migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)),", "primary_key=True, serialize=False, verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3, null=True)), ('tweet_id', models.CharField(db_index=True, max_length=20,", "models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3, null=True)), ('tweet_id', models.CharField(db_index=True, max_length=20, null=True)), ('tweet_info', models.JSONField()), ('is_retweet',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3, null=True)), ('tweet_id', models.CharField(db_index=True,", "operations = [ migrations.CreateModel( name='TwitterUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('screen_name', models.CharField(max_length=15)),", "migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()), ('tweet_lang',", "verbose_name='ID')), ('screen_name', models.CharField(max_length=15)), ], options={ 'db_table': 'twitter_users', }, ), migrations.CreateModel( name='Tweet', fields=[ ('id',", "models.CharField(max_length=15)), ], options={ 'db_table': 'twitter_users', }, ), migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "= True dependencies = [ ] operations = [ migrations.CreateModel( name='TwitterUser', fields=[ ('id',", "08:11 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True", "[ ] operations = [ migrations.CreateModel( name='TwitterUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "[ migrations.CreateModel( name='TwitterUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('screen_name', models.CharField(max_length=15)), ], options={", "('screen_name', models.CharField(max_length=15)), ], options={ 'db_table': 'twitter_users', }, ), migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True,", "'db_table': 'twitter_users', }, ), migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tweet_text',", "2021-01-25 08:11 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =", "Generated by Django 3.1.2 on 2021-01-25 08:11 from django.db import migrations, models import", "('tweet_lang', models.CharField(max_length=3, null=True)), ('tweet_id', models.CharField(db_index=True, max_length=20, null=True)), ('tweet_info', models.JSONField()), ('is_retweet', models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)),", "True dependencies = [ ] operations = [ migrations.CreateModel( name='TwitterUser', fields=[ ('id', models.AutoField(auto_created=True,", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =", "] operations = [ migrations.CreateModel( name='TwitterUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('screen_name',", "class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel(", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ]", "serialize=False, verbose_name='ID')), ('screen_name', models.CharField(max_length=15)), ], options={ 'db_table': 'twitter_users', }, ), migrations.CreateModel( name='Tweet', fields=[", "}, ), migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)), ('tweet_date',", "name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3,", "initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='TwitterUser', fields=[", "], options={ 'db_table': 'twitter_users', }, ), migrations.CreateModel( name='Tweet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "serialize=False, verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3, null=True)), ('tweet_id', models.CharField(db_index=True, max_length=20, null=True)),", "null=True)), ('tweet_id', models.CharField(db_index=True, max_length=20, null=True)), ('tweet_info', models.JSONField()), ('is_retweet', models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)), ('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [", "('tweet_date', models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3, null=True)), ('tweet_id', models.CharField(db_index=True, max_length=20, null=True)), ('tweet_info', models.JSONField()), ('is_retweet', models.BooleanField(default=True)),", "models.CharField(max_length=3, null=True)), ('tweet_id', models.CharField(db_index=True, max_length=20, null=True)), ('tweet_info', models.JSONField()), ('is_retweet', models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)), ('twitter_user',", "= [ migrations.CreateModel( name='TwitterUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('screen_name', models.CharField(max_length=15)), ],", "models.CharField(db_index=True, max_length=20, null=True)), ('tweet_info', models.JSONField()), ('is_retweet', models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)), ('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='db.twitteruser')), ],", "models.JSONField()), ('is_retweet', models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)), ('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='db.twitteruser')), ], options={ 'db_table': 'tweets', },", "# Generated by Django 3.1.2 on 2021-01-25 08:11 from django.db import migrations, models", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies", "max_length=20, null=True)), ('tweet_info', models.JSONField()), ('is_retweet', models.BooleanField(default=True)), ('retweet_count', models.IntegerField(null=True)), ('twitter_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='db.twitteruser')), ], options={", "3.1.2 on 2021-01-25 08:11 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='TwitterUser',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3, null=True)),", "Django 3.1.2 on 2021-01-25 08:11 from django.db import migrations, models import django.db.models.deletion class", "= [ ] operations = [ migrations.CreateModel( name='TwitterUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "verbose_name='ID')), ('tweet_text', models.CharField(max_length=280)), ('tweet_date', models.DateTimeField()), ('tweet_lang', models.CharField(max_length=3, null=True)), ('tweet_id', models.CharField(db_index=True, max_length=20, null=True)), ('tweet_info',", "on 2021-01-25 08:11 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('screen_name', models.CharField(max_length=15)), ], options={ 'db_table': 'twitter_users', }, )," ]
[ "tag): \"\"\" $ curl -d '{\"key1\":\"value1\", \"key2\":\"value2\"}' \\ -H \"Content-Type: application/json\" -X POST", "'{\"key1\":\"value1\", \"key2\":\"value2\"}' \\ -H \"Content-Type: application/json\" -X POST \\ localhost:1700/info/echo/hi | json :param", "\"\"\" $ curl localhost:1700/info/env/PATH :param request: :param tag: :return: \"\"\" import os return", "\"\"\" import os return json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>') async def echo(request, tag): \"\"\" $", ":return: \"\"\" import os return json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>') async def echo(request, tag): \"\"\"", "-X POST \\ localhost:1700/info/echo/hi | json :param request: :param tag: :return: \"\"\" data=request.json", ":param request: :param tag: :return: \"\"\" import os return json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>') async", "env_handler(request, tag): \"\"\" $ curl localhost:1700/info/env/PATH :param request: :param tag: :return: \"\"\" import", "json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>') async def echo(request, tag): \"\"\" $ curl -d '{\"key1\":\"value1\", \"key2\":\"value2\"}'", "tag: :return: \"\"\" data=request.json print(\"..\", data) return json({tag:'POST request - {}'.format(request.json), 'keys': list(data.keys()),", "url_prefix='/info') @info.route(\"/ping\") async def ping(request): \"\"\" $ curl localhost:1700/info/ping :param request: :return: \"\"\"", "$ curl localhost:1700/info/ping :param request: :return: \"\"\" return json({ \"hello\": \"world\" }) @info.route('/env/<tag>')", "import Blueprint from sanic.response import json info = Blueprint('info', url_prefix='/info') @info.route(\"/ping\") async def", "\"\"\" $ curl localhost:1700/info/ping :param request: :return: \"\"\" return json({ \"hello\": \"world\" })", "return json({ \"hello\": \"world\" }) @info.route('/env/<tag>') async def env_handler(request, tag): \"\"\" $ curl", "curl localhost:1700/info/env/PATH :param request: :param tag: :return: \"\"\" import os return json({tag: os.environ.get(tag)})", "tag: :return: \"\"\" import os return json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>') async def echo(request, tag):", "$ curl -d '{\"key1\":\"value1\", \"key2\":\"value2\"}' \\ -H \"Content-Type: application/json\" -X POST \\ localhost:1700/info/echo/hi", "\"Content-Type: application/json\" -X POST \\ localhost:1700/info/echo/hi | json :param request: :param tag: :return:", "= Blueprint('info', url_prefix='/info') @info.route(\"/ping\") async def ping(request): \"\"\" $ curl localhost:1700/info/ping :param request:", "Blueprint('info', url_prefix='/info') @info.route(\"/ping\") async def ping(request): \"\"\" $ curl localhost:1700/info/ping :param request: :return:", "@info.route(\"/ping\") async def ping(request): \"\"\" $ curl localhost:1700/info/ping :param request: :return: \"\"\" return", "ping(request): \"\"\" $ curl localhost:1700/info/ping :param request: :return: \"\"\" return json({ \"hello\": \"world\"", "@info.post('/echo/<tag>') async def echo(request, tag): \"\"\" $ curl -d '{\"key1\":\"value1\", \"key2\":\"value2\"}' \\ -H", "request: :param tag: :return: \"\"\" import os return json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>') async def", "sanic.response import json info = Blueprint('info', url_prefix='/info') @info.route(\"/ping\") async def ping(request): \"\"\" $", "localhost:1700/info/env/PATH :param request: :param tag: :return: \"\"\" import os return json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>')", "async def echo(request, tag): \"\"\" $ curl -d '{\"key1\":\"value1\", \"key2\":\"value2\"}' \\ -H \"Content-Type:", "-d '{\"key1\":\"value1\", \"key2\":\"value2\"}' \\ -H \"Content-Type: application/json\" -X POST \\ localhost:1700/info/echo/hi | json", "application/json\" -X POST \\ localhost:1700/info/echo/hi | json :param request: :param tag: :return: \"\"\"", "def echo(request, tag): \"\"\" $ curl -d '{\"key1\":\"value1\", \"key2\":\"value2\"}' \\ -H \"Content-Type: application/json\"", ":return: \"\"\" data=request.json print(\"..\", data) return json({tag:'POST request - {}'.format(request.json), 'keys': list(data.keys()), })", "echo(request, tag): \"\"\" $ curl -d '{\"key1\":\"value1\", \"key2\":\"value2\"}' \\ -H \"Content-Type: application/json\" -X", ":return: \"\"\" return json({ \"hello\": \"world\" }) @info.route('/env/<tag>') async def env_handler(request, tag): \"\"\"", "os return json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>') async def echo(request, tag): \"\"\" $ curl -d", "POST \\ localhost:1700/info/echo/hi | json :param request: :param tag: :return: \"\"\" data=request.json print(\"..\",", ":param tag: :return: \"\"\" import os return json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>') async def echo(request,", "tag): \"\"\" $ curl localhost:1700/info/env/PATH :param request: :param tag: :return: \"\"\" import os", ":param request: :return: \"\"\" return json({ \"hello\": \"world\" }) @info.route('/env/<tag>') async def env_handler(request,", "\"\"\" return json({ \"hello\": \"world\" }) @info.route('/env/<tag>') async def env_handler(request, tag): \"\"\" $", "curl localhost:1700/info/ping :param request: :return: \"\"\" return json({ \"hello\": \"world\" }) @info.route('/env/<tag>') async", "\"world\" }) @info.route('/env/<tag>') async def env_handler(request, tag): \"\"\" $ curl localhost:1700/info/env/PATH :param request:", "async def ping(request): \"\"\" $ curl localhost:1700/info/ping :param request: :return: \"\"\" return json({", "@info.route('/env/<tag>') async def env_handler(request, tag): \"\"\" $ curl localhost:1700/info/env/PATH :param request: :param tag:", ":param request: :param tag: :return: \"\"\" data=request.json print(\"..\", data) return json({tag:'POST request -", "localhost:1700/info/echo/hi | json :param request: :param tag: :return: \"\"\" data=request.json print(\"..\", data) return", "from sanic.response import json info = Blueprint('info', url_prefix='/info') @info.route(\"/ping\") async def ping(request): \"\"\"", "json info = Blueprint('info', url_prefix='/info') @info.route(\"/ping\") async def ping(request): \"\"\" $ curl localhost:1700/info/ping", "json({ \"hello\": \"world\" }) @info.route('/env/<tag>') async def env_handler(request, tag): \"\"\" $ curl localhost:1700/info/env/PATH", "def env_handler(request, tag): \"\"\" $ curl localhost:1700/info/env/PATH :param request: :param tag: :return: \"\"\"", "localhost:1700/info/ping :param request: :return: \"\"\" return json({ \"hello\": \"world\" }) @info.route('/env/<tag>') async def", "curl -d '{\"key1\":\"value1\", \"key2\":\"value2\"}' \\ -H \"Content-Type: application/json\" -X POST \\ localhost:1700/info/echo/hi |", "import os return json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>') async def echo(request, tag): \"\"\" $ curl", "request: :param tag: :return: \"\"\" data=request.json print(\"..\", data) return json({tag:'POST request - {}'.format(request.json),", "return json({tag: os.environ.get(tag)}) @info.post('/echo/<tag>') async def echo(request, tag): \"\"\" $ curl -d '{\"key1\":\"value1\",", "import json info = Blueprint('info', url_prefix='/info') @info.route(\"/ping\") async def ping(request): \"\"\" $ curl", "\\ -H \"Content-Type: application/json\" -X POST \\ localhost:1700/info/echo/hi | json :param request: :param", "\"\"\" $ curl -d '{\"key1\":\"value1\", \"key2\":\"value2\"}' \\ -H \"Content-Type: application/json\" -X POST \\", "\"hello\": \"world\" }) @info.route('/env/<tag>') async def env_handler(request, tag): \"\"\" $ curl localhost:1700/info/env/PATH :param", "| json :param request: :param tag: :return: \"\"\" data=request.json print(\"..\", data) return json({tag:'POST", "$ curl localhost:1700/info/env/PATH :param request: :param tag: :return: \"\"\" import os return json({tag:", "-H \"Content-Type: application/json\" -X POST \\ localhost:1700/info/echo/hi | json :param request: :param tag:", "async def env_handler(request, tag): \"\"\" $ curl localhost:1700/info/env/PATH :param request: :param tag: :return:", "\"key2\":\"value2\"}' \\ -H \"Content-Type: application/json\" -X POST \\ localhost:1700/info/echo/hi | json :param request:", "info = Blueprint('info', url_prefix='/info') @info.route(\"/ping\") async def ping(request): \"\"\" $ curl localhost:1700/info/ping :param", "Blueprint from sanic.response import json info = Blueprint('info', url_prefix='/info') @info.route(\"/ping\") async def ping(request):", "json :param request: :param tag: :return: \"\"\" data=request.json print(\"..\", data) return json({tag:'POST request", "from sanic import Blueprint from sanic.response import json info = Blueprint('info', url_prefix='/info') @info.route(\"/ping\")", "def ping(request): \"\"\" $ curl localhost:1700/info/ping :param request: :return: \"\"\" return json({ \"hello\":", "}) @info.route('/env/<tag>') async def env_handler(request, tag): \"\"\" $ curl localhost:1700/info/env/PATH :param request: :param", "request: :return: \"\"\" return json({ \"hello\": \"world\" }) @info.route('/env/<tag>') async def env_handler(request, tag):", "\\ localhost:1700/info/echo/hi | json :param request: :param tag: :return: \"\"\" data=request.json print(\"..\", data)", ":param tag: :return: \"\"\" data=request.json print(\"..\", data) return json({tag:'POST request - {}'.format(request.json), 'keys':", "sanic import Blueprint from sanic.response import json info = Blueprint('info', url_prefix='/info') @info.route(\"/ping\") async", "os.environ.get(tag)}) @info.post('/echo/<tag>') async def echo(request, tag): \"\"\" $ curl -d '{\"key1\":\"value1\", \"key2\":\"value2\"}' \\" ]
[ "aimlKernel def chat(request): \"\"\" Function: 对话接口 Args: request: 请求 Returns: 返回Http报文 \"\"\" dic", "def chat(request): \"\"\" Function: 对话接口 Args: request: 请求 Returns: 返回Http报文 \"\"\" dic =", "dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid'] = request.GET.get('sessionid','test') return HttpResponse(json.dumps(dic, ensure_ascii=False)) else: dic['message'] =", "dic['botResponse']) dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid'] = request.GET.get('sessionid','test') return HttpResponse(json.dumps(dic, ensure_ascii=False)) else: dic['message']", "%H:%M:%S')) dic['sessionid'] = request.GET.get('sessionid','test') return HttpResponse(json.dumps(dic, ensure_ascii=False)) else: dic['message'] = u'方法错误' return HttpResponse(json.dumps(dic,", "对话接口 Args: request: 请求 Returns: 返回Http报文 \"\"\" dic = {} if request.method ==", "from chatbot import aimlKernel def chat(request): \"\"\" Function: 对话接口 Args: request: 请求 Returns:", "dic['sessionid'] = request.GET.get('sessionid','test') return HttpResponse(json.dumps(dic, ensure_ascii=False)) else: dic['message'] = u'方法错误' return HttpResponse(json.dumps(dic, ensure_ascii=False))", "# -*- coding: utf-8 -*- import json import datetime from DataBase import DBOPs", "Function: 对话接口 Args: request: 请求 Returns: 返回Http报文 \"\"\" dic = {} if request.method", "datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid'] = request.GET.get('sessionid','test') return HttpResponse(json.dumps(dic, ensure_ascii=False)) else: dic['message'] = u'方法错误' return", "from django.shortcuts import render # Create your views here. # -*- coding: utf-8", "== 'GET': dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\ replace(' ', '') DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask',", "DataBase import DBOPs from django.http import HttpResponse from chatbot import aimlKernel def chat(request):", "# Create your views here. # -*- coding: utf-8 -*- import json import", "django.shortcuts import render # Create your views here. # -*- coding: utf-8 -*-", "'GET': dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\ replace(' ', '') DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'),", "Args: request: 请求 Returns: 返回Http报文 \"\"\" dic = {} if request.method == 'GET':", "dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\ replace(' ', '') DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse'])", "render # Create your views here. # -*- coding: utf-8 -*- import json", "datetime from DataBase import DBOPs from django.http import HttpResponse from chatbot import aimlKernel", "'无语'), request.GET.get('sessionid','test')).\\ replace(' ', '') DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse']) dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d", "if request.method == 'GET': dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\ replace(' ', '') DBOPs.InsertDB(request.GET.get('sessionid',", "import DBOPs from django.http import HttpResponse from chatbot import aimlKernel def chat(request): \"\"\"", "返回Http报文 \"\"\" dic = {} if request.method == 'GET': dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'),", "replace(' ', '') DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse']) dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid']", "import datetime from DataBase import DBOPs from django.http import HttpResponse from chatbot import", "DBOPs from django.http import HttpResponse from chatbot import aimlKernel def chat(request): \"\"\" Function:", "'无语'), dic['botResponse']) dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid'] = request.GET.get('sessionid','test') return HttpResponse(json.dumps(dic, ensure_ascii=False)) else:", "chat(request): \"\"\" Function: 对话接口 Args: request: 请求 Returns: 返回Http报文 \"\"\" dic = {}", "coding: utf-8 -*- import json import datetime from DataBase import DBOPs from django.http", "json import datetime from DataBase import DBOPs from django.http import HttpResponse from chatbot", "\"\"\" dic = {} if request.method == 'GET': dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\", "Returns: 返回Http报文 \"\"\" dic = {} if request.method == 'GET': dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask',", "= datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid'] = request.GET.get('sessionid','test') return HttpResponse(json.dumps(dic, ensure_ascii=False)) else: dic['message'] = u'方法错误'", "= {} if request.method == 'GET': dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\ replace(' ',", "\"\"\" Function: 对话接口 Args: request: 请求 Returns: 返回Http报文 \"\"\" dic = {} if", "your views here. # -*- coding: utf-8 -*- import json import datetime from", "import HttpResponse from chatbot import aimlKernel def chat(request): \"\"\" Function: 对话接口 Args: request:", "', '') DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse']) dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid'] =", "dic = {} if request.method == 'GET': dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\ replace('", "views here. # -*- coding: utf-8 -*- import json import datetime from DataBase", "{} if request.method == 'GET': dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\ replace(' ', '')", "= aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\ replace(' ', '') DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse']) dic['time']", "django.http import HttpResponse from chatbot import aimlKernel def chat(request): \"\"\" Function: 对话接口 Args:", "from django.http import HttpResponse from chatbot import aimlKernel def chat(request): \"\"\" Function: 对话接口", "请求 Returns: 返回Http报文 \"\"\" dic = {} if request.method == 'GET': dic['botResponse'] =", "-*- import json import datetime from DataBase import DBOPs from django.http import HttpResponse", "DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse']) dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid'] = request.GET.get('sessionid','test') return", "import render # Create your views here. # -*- coding: utf-8 -*- import", "import aimlKernel def chat(request): \"\"\" Function: 对话接口 Args: request: 请求 Returns: 返回Http报文 \"\"\"", "import json import datetime from DataBase import DBOPs from django.http import HttpResponse from", "here. # -*- coding: utf-8 -*- import json import datetime from DataBase import", "aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\ replace(' ', '') DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse']) dic['time'] =", "request.GET.get('sessionid','test')).\\ replace(' ', '') DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse']) dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S'))", "-*- coding: utf-8 -*- import json import datetime from DataBase import DBOPs from", "chatbot import aimlKernel def chat(request): \"\"\" Function: 对话接口 Args: request: 请求 Returns: 返回Http报文", "HttpResponse from chatbot import aimlKernel def chat(request): \"\"\" Function: 对话接口 Args: request: 请求", "'') DBOPs.InsertDB(request.GET.get('sessionid', 'test'), request.GET.get('ask', '无语'), dic['botResponse']) dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid'] = request.GET.get('sessionid','test')", "'test'), request.GET.get('ask', '无语'), dic['botResponse']) dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid'] = request.GET.get('sessionid','test') return HttpResponse(json.dumps(dic,", "request.GET.get('ask', '无语'), dic['botResponse']) dic['time'] = datetime.datetime.now().strftime(('%Y-%m-%d %H:%M:%S')) dic['sessionid'] = request.GET.get('sessionid','test') return HttpResponse(json.dumps(dic, ensure_ascii=False))", "Create your views here. # -*- coding: utf-8 -*- import json import datetime", "request.method == 'GET': dic['botResponse'] = aimlKernel.k.respond(request.GET.get('ask', '无语'), request.GET.get('sessionid','test')).\\ replace(' ', '') DBOPs.InsertDB(request.GET.get('sessionid', 'test'),", "request: 请求 Returns: 返回Http报文 \"\"\" dic = {} if request.method == 'GET': dic['botResponse']", "utf-8 -*- import json import datetime from DataBase import DBOPs from django.http import", "from DataBase import DBOPs from django.http import HttpResponse from chatbot import aimlKernel def" ]
[ "palindrome import is_palindrome def test_is_palindrome_1(): assert is_palindrome('a') def test_is_palindrome_2(): assert is_palindrome('aa') assert not", "is_palindrome('abc') def test_is_palindrome_4(): assert is_palindrome('abba') assert not is_palindrome('abcd') def test_is_palindrome_5(): assert is_palindrome('abcba') assert", "test_is_palindrome_3(): assert is_palindrome('aba') assert not is_palindrome('abc') def test_is_palindrome_4(): assert is_palindrome('abba') assert not is_palindrome('abcd')", "test_is_palindrome_2(): assert is_palindrome('aa') assert not is_palindrome('ab') def test_is_palindrome_3(): assert is_palindrome('aba') assert not is_palindrome('abc')", "assert not is_palindrome('ab') def test_is_palindrome_3(): assert is_palindrome('aba') assert not is_palindrome('abc') def test_is_palindrome_4(): assert", "assert is_palindrome('a') def test_is_palindrome_2(): assert is_palindrome('aa') assert not is_palindrome('ab') def test_is_palindrome_3(): assert is_palindrome('aba')", "is_palindrome('aa') assert not is_palindrome('ab') def test_is_palindrome_3(): assert is_palindrome('aba') assert not is_palindrome('abc') def test_is_palindrome_4():", "test_is_palindrome_1(): assert is_palindrome('a') def test_is_palindrome_2(): assert is_palindrome('aa') assert not is_palindrome('ab') def test_is_palindrome_3(): assert", "assert not is_palindrome('abc') def test_is_palindrome_4(): assert is_palindrome('abba') assert not is_palindrome('abcd') def test_is_palindrome_5(): assert", "test_is_palindrome_4(): assert is_palindrome('abba') assert not is_palindrome('abcd') def test_is_palindrome_5(): assert is_palindrome('abcba') assert not is_palindrome('abcde')", "not is_palindrome('ab') def test_is_palindrome_3(): assert is_palindrome('aba') assert not is_palindrome('abc') def test_is_palindrome_4(): assert is_palindrome('abba')", "is_palindrome('a') def test_is_palindrome_2(): assert is_palindrome('aa') assert not is_palindrome('ab') def test_is_palindrome_3(): assert is_palindrome('aba') assert", "def test_is_palindrome_4(): assert is_palindrome('abba') assert not is_palindrome('abcd') def test_is_palindrome_5(): assert is_palindrome('abcba') assert not", "<gh_stars>0 from palindrome import is_palindrome def test_is_palindrome_1(): assert is_palindrome('a') def test_is_palindrome_2(): assert is_palindrome('aa')", "def test_is_palindrome_3(): assert is_palindrome('aba') assert not is_palindrome('abc') def test_is_palindrome_4(): assert is_palindrome('abba') assert not", "from palindrome import is_palindrome def test_is_palindrome_1(): assert is_palindrome('a') def test_is_palindrome_2(): assert is_palindrome('aa') assert", "assert is_palindrome('aba') assert not is_palindrome('abc') def test_is_palindrome_4(): assert is_palindrome('abba') assert not is_palindrome('abcd') def", "is_palindrome('ab') def test_is_palindrome_3(): assert is_palindrome('aba') assert not is_palindrome('abc') def test_is_palindrome_4(): assert is_palindrome('abba') assert", "import is_palindrome def test_is_palindrome_1(): assert is_palindrome('a') def test_is_palindrome_2(): assert is_palindrome('aa') assert not is_palindrome('ab')", "assert is_palindrome('aa') assert not is_palindrome('ab') def test_is_palindrome_3(): assert is_palindrome('aba') assert not is_palindrome('abc') def", "is_palindrome('aba') assert not is_palindrome('abc') def test_is_palindrome_4(): assert is_palindrome('abba') assert not is_palindrome('abcd') def test_is_palindrome_5():", "def test_is_palindrome_1(): assert is_palindrome('a') def test_is_palindrome_2(): assert is_palindrome('aa') assert not is_palindrome('ab') def test_is_palindrome_3():", "not is_palindrome('abc') def test_is_palindrome_4(): assert is_palindrome('abba') assert not is_palindrome('abcd') def test_is_palindrome_5(): assert is_palindrome('abcba')", "is_palindrome def test_is_palindrome_1(): assert is_palindrome('a') def test_is_palindrome_2(): assert is_palindrome('aa') assert not is_palindrome('ab') def", "def test_is_palindrome_2(): assert is_palindrome('aa') assert not is_palindrome('ab') def test_is_palindrome_3(): assert is_palindrome('aba') assert not" ]
[ "count += len(i.attrib) return count if __name__ == '__main__': sys.stdin.readline() xml = sys.stdin.read()", "for i in elem: depth(i, level) if __name__ == '__main__': n = int(input())", "import sys import xml.etree.ElementTree as etree def get_attr_number(node): # your code goes here", "+= len(i.attrib) return count if __name__ == '__main__': sys.stdin.readline() xml = sys.stdin.read() tree", "code goes here level += 1 if level >= maxdepth: maxdepth = level", "0 def depth(elem, level): global maxdepth # your code goes here level +=", "in elem: depth(i, level) if __name__ == '__main__': n = int(input()) xml =", "count = 0 for i in root.iter(): count += len(i.attrib) return count if", "xml.etree.ElementTree as etree def get_attr_number(node): # your code goes here count = 0", ">= maxdepth: maxdepth = level for i in elem: depth(i, level) if __name__", "n = int(input()) xml = \"\" for i in range(n): xml = xml", "sys.stdin.read() tree = etree.ElementTree(etree.fromstring(xml)) root = tree.getroot() print(get_attr_number(root)) ############### XML2 - Find the", "print(get_attr_number(root)) ############### XML2 - Find the Maximum Depth ################ import xml.etree.ElementTree as etree", "Score ################ import sys import xml.etree.ElementTree as etree def get_attr_number(node): # your code", "i in range(n): xml = xml + input() + \"\\n\" tree = etree.ElementTree(etree.fromstring(xml))", "Depth ################ import xml.etree.ElementTree as etree maxdepth = 0 def depth(elem, level): global", "return count if __name__ == '__main__': sys.stdin.readline() xml = sys.stdin.read() tree = etree.ElementTree(etree.fromstring(xml))", "# your code goes here count = 0 for i in root.iter(): count", "the Maximum Depth ################ import xml.etree.ElementTree as etree maxdepth = 0 def depth(elem,", "maxdepth # your code goes here level += 1 if level >= maxdepth:", "code goes here count = 0 for i in root.iter(): count += len(i.attrib)", "in root.iter(): count += len(i.attrib) return count if __name__ == '__main__': sys.stdin.readline() xml", "for i in range(n): xml = xml + input() + \"\\n\" tree =", "as etree def get_attr_number(node): # your code goes here count = 0 for", "here level += 1 if level >= maxdepth: maxdepth = level for i", "depth(elem, level): global maxdepth # your code goes here level += 1 if", "################ import sys import xml.etree.ElementTree as etree def get_attr_number(node): # your code goes", "sys.stdin.readline() xml = sys.stdin.read() tree = etree.ElementTree(etree.fromstring(xml)) root = tree.getroot() print(get_attr_number(root)) ############### XML2", "'__main__': sys.stdin.readline() xml = sys.stdin.read() tree = etree.ElementTree(etree.fromstring(xml)) root = tree.getroot() print(get_attr_number(root)) ###############", "__name__ == '__main__': n = int(input()) xml = \"\" for i in range(n):", "0 for i in root.iter(): count += len(i.attrib) return count if __name__ ==", "xml.etree.ElementTree as etree maxdepth = 0 def depth(elem, level): global maxdepth # your", "your code goes here level += 1 if level >= maxdepth: maxdepth =", "= etree.ElementTree(etree.fromstring(xml)) root = tree.getroot() print(get_attr_number(root)) ############### XML2 - Find the Maximum Depth", "if level >= maxdepth: maxdepth = level for i in elem: depth(i, level)", "sys import xml.etree.ElementTree as etree def get_attr_number(node): # your code goes here count", "- Find the Maximum Depth ################ import xml.etree.ElementTree as etree maxdepth = 0", "== '__main__': sys.stdin.readline() xml = sys.stdin.read() tree = etree.ElementTree(etree.fromstring(xml)) root = tree.getroot() print(get_attr_number(root))", "if __name__ == '__main__': sys.stdin.readline() xml = sys.stdin.read() tree = etree.ElementTree(etree.fromstring(xml)) root =", "+= 1 if level >= maxdepth: maxdepth = level for i in elem:", "= 0 for i in root.iter(): count += len(i.attrib) return count if __name__", "goes here level += 1 if level >= maxdepth: maxdepth = level for", "################ import xml.etree.ElementTree as etree maxdepth = 0 def depth(elem, level): global maxdepth", "XML 1 - Find the Score ################ import sys import xml.etree.ElementTree as etree", "etree maxdepth = 0 def depth(elem, level): global maxdepth # your code goes", "depth(i, level) if __name__ == '__main__': n = int(input()) xml = \"\" for", "'__main__': n = int(input()) xml = \"\" for i in range(n): xml =", "\"\" for i in range(n): xml = xml + input() + \"\\n\" tree", "as etree maxdepth = 0 def depth(elem, level): global maxdepth # your code", "i in elem: depth(i, level) if __name__ == '__main__': n = int(input()) xml", "1 if level >= maxdepth: maxdepth = level for i in elem: depth(i,", "int(input()) xml = \"\" for i in range(n): xml = xml + input()", "== '__main__': n = int(input()) xml = \"\" for i in range(n): xml", "= \"\" for i in range(n): xml = xml + input() + \"\\n\"", "XML2 - Find the Maximum Depth ################ import xml.etree.ElementTree as etree maxdepth =", "for i in root.iter(): count += len(i.attrib) return count if __name__ == '__main__':", "len(i.attrib) return count if __name__ == '__main__': sys.stdin.readline() xml = sys.stdin.read() tree =", "root = tree.getroot() print(get_attr_number(root)) ############### XML2 - Find the Maximum Depth ################ import", "get_attr_number(node): # your code goes here count = 0 for i in root.iter():", "Find the Maximum Depth ################ import xml.etree.ElementTree as etree maxdepth = 0 def", "def get_attr_number(node): # your code goes here count = 0 for i in", "level) if __name__ == '__main__': n = int(input()) xml = \"\" for i", "############### XML 1 - Find the Score ################ import sys import xml.etree.ElementTree as", "range(n): xml = xml + input() + \"\\n\" tree = etree.ElementTree(etree.fromstring(xml)) depth(tree.getroot(), -1)", "############### XML2 - Find the Maximum Depth ################ import xml.etree.ElementTree as etree maxdepth", "level += 1 if level >= maxdepth: maxdepth = level for i in", "tree.getroot() print(get_attr_number(root)) ############### XML2 - Find the Maximum Depth ################ import xml.etree.ElementTree as", "elem: depth(i, level) if __name__ == '__main__': n = int(input()) xml = \"\"", "import xml.etree.ElementTree as etree def get_attr_number(node): # your code goes here count =", "= sys.stdin.read() tree = etree.ElementTree(etree.fromstring(xml)) root = tree.getroot() print(get_attr_number(root)) ############### XML2 - Find", "def depth(elem, level): global maxdepth # your code goes here level += 1", "goes here count = 0 for i in root.iter(): count += len(i.attrib) return", "= int(input()) xml = \"\" for i in range(n): xml = xml +", "Maximum Depth ################ import xml.etree.ElementTree as etree maxdepth = 0 def depth(elem, level):", "maxdepth: maxdepth = level for i in elem: depth(i, level) if __name__ ==", "count if __name__ == '__main__': sys.stdin.readline() xml = sys.stdin.read() tree = etree.ElementTree(etree.fromstring(xml)) root", "xml = xml + input() + \"\\n\" tree = etree.ElementTree(etree.fromstring(xml)) depth(tree.getroot(), -1) print(maxdepth)", "xml = sys.stdin.read() tree = etree.ElementTree(etree.fromstring(xml)) root = tree.getroot() print(get_attr_number(root)) ############### XML2 -", "1 - Find the Score ################ import sys import xml.etree.ElementTree as etree def", "maxdepth = level for i in elem: depth(i, level) if __name__ == '__main__':", "in range(n): xml = xml + input() + \"\\n\" tree = etree.ElementTree(etree.fromstring(xml)) depth(tree.getroot(),", "xml = \"\" for i in range(n): xml = xml + input() +", "your code goes here count = 0 for i in root.iter(): count +=", "level): global maxdepth # your code goes here level += 1 if level", "etree def get_attr_number(node): # your code goes here count = 0 for i", "import xml.etree.ElementTree as etree maxdepth = 0 def depth(elem, level): global maxdepth #", "- Find the Score ################ import sys import xml.etree.ElementTree as etree def get_attr_number(node):", "root.iter(): count += len(i.attrib) return count if __name__ == '__main__': sys.stdin.readline() xml =", "maxdepth = 0 def depth(elem, level): global maxdepth # your code goes here", "# your code goes here level += 1 if level >= maxdepth: maxdepth", "tree = etree.ElementTree(etree.fromstring(xml)) root = tree.getroot() print(get_attr_number(root)) ############### XML2 - Find the Maximum", "level for i in elem: depth(i, level) if __name__ == '__main__': n =", "the Score ################ import sys import xml.etree.ElementTree as etree def get_attr_number(node): # your", "etree.ElementTree(etree.fromstring(xml)) root = tree.getroot() print(get_attr_number(root)) ############### XML2 - Find the Maximum Depth ################", "= tree.getroot() print(get_attr_number(root)) ############### XML2 - Find the Maximum Depth ################ import xml.etree.ElementTree", "i in root.iter(): count += len(i.attrib) return count if __name__ == '__main__': sys.stdin.readline()", "level >= maxdepth: maxdepth = level for i in elem: depth(i, level) if", "= 0 def depth(elem, level): global maxdepth # your code goes here level", "if __name__ == '__main__': n = int(input()) xml = \"\" for i in", "__name__ == '__main__': sys.stdin.readline() xml = sys.stdin.read() tree = etree.ElementTree(etree.fromstring(xml)) root = tree.getroot()", "global maxdepth # your code goes here level += 1 if level >=", "Find the Score ################ import sys import xml.etree.ElementTree as etree def get_attr_number(node): #", "here count = 0 for i in root.iter(): count += len(i.attrib) return count", "= level for i in elem: depth(i, level) if __name__ == '__main__': n" ]
[ "sys from os import listdir from os.path import isfile, join import psutil import", "import sys from os import listdir from os.path import isfile, join import psutil", "images subprocess.call('rsync -av '+ datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+' '+ '--exclude VOC2007/JPEGImages', shell=True) in_img_dir =", "= 'temp_'+str(os.getpid()) subprocess.call('mkdir -p '+temp_dir,shell=True) # Convert to png # im = Image.open(in_img_dir+file_name)", "VOC2007/JPEGImages', shell=True) in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make the directory for", "+ '/', shell=True) # Convert back to jpeg and save im = Image.open(temp_dir+'/'+'output.png')", "file_list] for i, f in enumerate(futures.as_completed(fs)): # Write progress to error so that", "'/', shell=True) # Convert back to jpeg and save im = Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name)", "section subprocess.call('mkdir -p '+out_img_dir,shell=True) # Get list of files in directory file_list =", "subprocess.call('mkdir -p '+temp_dir,shell=True) # Convert to png # im = Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') #", "'+temp_dir,shell=True) # Convert to png # im = Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') # Run the", "= [ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64] in_vers = [ 0, 0,", "this section subprocess.call('mkdir -p '+out_img_dir,shell=True) # Get list of files in directory file_list", "../common/ version='+str(version),shell=True) # Copy all but the JPEG images subprocess.call('rsync -av '+ datasetpath+'/v'+str(in_version)+'/", "in enumerate(futures.as_completed(fs)): # Write progress to error so that it can be seen", "# Delete temp directory subprocess.call('rm -rf '+temp_dir,shell=True) for i, version in enumerate(vers_to_run): in_version", "in_version = in_vers[i] subprocess.call('make --directory ../common/ version='+str(version),shell=True) # Copy all but the JPEG", "it can be seen sys.stderr.write( \\ \"Converted Image: {} / {} \\r\".format(i, len(file_list)))", "__future__ import unicode_literals from PIL import Image from subprocess import check_call from concurrent", "'/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir): # Make temp directory temp_dir = 'temp_'+str(os.getpid()) subprocess.call('mkdir -p '+temp_dir,shell=True)", "= [f for f in listdir(in_img_dir) if isfile(join(in_img_dir, f))] file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads) as", "that it can be seen sys.stderr.write( \\ \"Converted Image: {} / {} \\r\".format(i,", "executor: fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list] for i, f in enumerate(futures.as_completed(fs)):", "enumerate(futures.as_completed(fs)): # Write progress to error so that it can be seen sys.stderr.write(", "save im = Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) # Delete temp directory subprocess.call('rm -rf '+temp_dir,shell=True) for", "6, 6] num_threads = 14 # The directory to convert datasetpath = '/datasets/voc-2007/'", "subprocess.call('../common/pipeline_V'+str(version) + '.o ' + temp_dir + '/' + file_name + '_temp.png '", "from PIL import Image from subprocess import check_call from concurrent import futures import", "'_temp.png ' + temp_dir + '/', shell=True) # Convert back to jpeg and", "subprocess import sys from os import listdir from os.path import isfile, join import", "temp_dir = 'temp_'+str(os.getpid()) subprocess.call('mkdir -p '+temp_dir,shell=True) # Convert to png # im =", "# Copy all but the JPEG images subprocess.call('rsync -av '+ datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+'", "= [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list] for i, f in enumerate(futures.as_completed(fs)): # Write", "futures import subprocess import os import io import subprocess import sys from os", "import subprocess import os import io import subprocess import sys from os import", "for i, version in enumerate(vers_to_run): in_version = in_vers[i] subprocess.call('make --directory ../common/ version='+str(version),shell=True) #", "but the JPEG images subprocess.call('rsync -av '+ datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+' '+ '--exclude VOC2007/JPEGImages',", "0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6,", "import time vers_to_run = [ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64] in_vers =", "The directory to convert datasetpath = '/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir): # Make temp directory", "in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make the directory for this section", "# Make the directory for this section subprocess.call('mkdir -p '+out_img_dir,shell=True) # Get list", "list of files in directory file_list = [f for f in listdir(in_img_dir) if", "im = Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) # Delete temp directory subprocess.call('rm -rf '+temp_dir,shell=True) for i,", "-rf '+temp_dir,shell=True) for i, version in enumerate(vers_to_run): in_version = in_vers[i] subprocess.call('make --directory ../common/", "Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) # Delete temp directory subprocess.call('rm -rf '+temp_dir,shell=True) for i, version in", "isfile, join import psutil import time vers_to_run = [ 3, 4, 5, 7,", "subprocess.call('rm -rf '+temp_dir,shell=True) for i, version in enumerate(vers_to_run): in_version = in_vers[i] subprocess.call('make --directory", "the given pipeline on the png subprocess.call('../common/pipeline_V'+str(version) + '.o ' + temp_dir +", "import listdir from os.path import isfile, join import psutil import time vers_to_run =", "datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+' '+ '--exclude VOC2007/JPEGImages', shell=True) in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/'", "from __future__ import unicode_literals from PIL import Image from subprocess import check_call from", "import psutil import time vers_to_run = [ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64]", "to convert datasetpath = '/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir): # Make temp directory temp_dir =", "# Make temp directory temp_dir = 'temp_'+str(os.getpid()) subprocess.call('mkdir -p '+temp_dir,shell=True) # Convert to", "[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 6,", "convert_img(file_name,in_img_dir,out_img_dir): # Make temp directory temp_dir = 'temp_'+str(os.getpid()) subprocess.call('mkdir -p '+temp_dir,shell=True) # Convert", "6] num_threads = 14 # The directory to convert datasetpath = '/datasets/voc-2007/' def", "Convert back to jpeg and save im = Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) # Delete temp", "0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6, 6] num_threads", "+ '/' + file_name + '_temp.png ' + temp_dir + '/', shell=True) #", "import os import io import subprocess import sys from os import listdir from", "im.save(out_img_dir+'/'+file_name) # Delete temp directory subprocess.call('rm -rf '+temp_dir,shell=True) for i, version in enumerate(vers_to_run):", "version in enumerate(vers_to_run): in_version = in_vers[i] subprocess.call('make --directory ../common/ version='+str(version),shell=True) # Copy all", "file_list = [f for f in listdir(in_img_dir) if isfile(join(in_img_dir, f))] file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads)", "temp directory subprocess.call('rm -rf '+temp_dir,shell=True) for i, version in enumerate(vers_to_run): in_version = in_vers[i]", "Image from subprocess import check_call from concurrent import futures import subprocess import os", "enumerate(vers_to_run): in_version = in_vers[i] subprocess.call('make --directory ../common/ version='+str(version),shell=True) # Copy all but the", "subprocess import os import io import subprocess import sys from os import listdir", "in enumerate(vers_to_run): in_version = in_vers[i] subprocess.call('make --directory ../common/ version='+str(version),shell=True) # Copy all but", "directory to convert datasetpath = '/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir): # Make temp directory temp_dir", "for file_name in file_list] for i, f in enumerate(futures.as_completed(fs)): # Write progress to", "'+ '--exclude VOC2007/JPEGImages', shell=True) in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make the", "convert datasetpath = '/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir): # Make temp directory temp_dir = 'temp_'+str(os.getpid())", "f))] file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads) as executor: fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list]", "temp_dir + '/', shell=True) # Convert back to jpeg and save im =", "0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6, 6]", "pipeline on the png subprocess.call('../common/pipeline_V'+str(version) + '.o ' + temp_dir + '/' +", "directory temp_dir = 'temp_'+str(os.getpid()) subprocess.call('mkdir -p '+temp_dir,shell=True) # Convert to png # im", "-av '+ datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+' '+ '--exclude VOC2007/JPEGImages', shell=True) in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir", "JPEG images subprocess.call('rsync -av '+ datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+' '+ '--exclude VOC2007/JPEGImages', shell=True) in_img_dir", "+ file_name + '_temp.png ' + temp_dir + '/', shell=True) # Convert back", "import io import subprocess import sys from os import listdir from os.path import", "in_vers = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,", "from concurrent import futures import subprocess import os import io import subprocess import", "PIL import Image from subprocess import check_call from concurrent import futures import subprocess", "6, 6, 6, 6, 6, 6] num_threads = 14 # The directory to", "listdir(in_img_dir) if isfile(join(in_img_dir, f))] file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads) as executor: fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for", "Write progress to error so that it can be seen sys.stderr.write( \\ \"Converted", "6, 6, 6] num_threads = 14 # The directory to convert datasetpath =", "= in_vers[i] subprocess.call('make --directory ../common/ version='+str(version),shell=True) # Copy all but the JPEG images", "unicode_literals from PIL import Image from subprocess import check_call from concurrent import futures", "version='+str(version),shell=True) # Copy all but the JPEG images subprocess.call('rsync -av '+ datasetpath+'/v'+str(in_version)+'/ '+", "png # im = Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') # Run the given pipeline on the", "the JPEG images subprocess.call('rsync -av '+ datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+' '+ '--exclude VOC2007/JPEGImages', shell=True)", "= '/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir): # Make temp directory temp_dir = 'temp_'+str(os.getpid()) subprocess.call('mkdir -p", "# Write progress to error so that it can be seen sys.stderr.write( \\", "temp_dir + '/' + file_name + '_temp.png ' + temp_dir + '/', shell=True)", "check_call from concurrent import futures import subprocess import os import io import subprocess", "to jpeg and save im = Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) # Delete temp directory subprocess.call('rm", "progress to error so that it can be seen sys.stderr.write( \\ \"Converted Image:", "'+out_img_dir,shell=True) # Get list of files in directory file_list = [f for f", "all but the JPEG images subprocess.call('rsync -av '+ datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+' '+ '--exclude", "file_name + '_temp.png ' + temp_dir + '/', shell=True) # Convert back to", "time vers_to_run = [ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64] in_vers = [", "directory file_list = [f for f in listdir(in_img_dir) if isfile(join(in_img_dir, f))] file_list.sort() with", "isfile(join(in_img_dir, f))] file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads) as executor: fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in", "14 # The directory to convert datasetpath = '/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir): # Make", "' + temp_dir + '/', shell=True) # Convert back to jpeg and save", "io import subprocess import sys from os import listdir from os.path import isfile,", "= Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) # Delete temp directory subprocess.call('rm -rf '+temp_dir,shell=True) for i, version", "'--exclude VOC2007/JPEGImages', shell=True) in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make the directory", "0, 0, 0, 6, 6, 6, 6, 6, 6, 6] num_threads = 14", "#! /usr/bin/env python from __future__ import unicode_literals from PIL import Image from subprocess", "the directory for this section subprocess.call('mkdir -p '+out_img_dir,shell=True) # Get list of files", "in file_list] for i, f in enumerate(futures.as_completed(fs)): # Write progress to error so", "# im = Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') # Run the given pipeline on the png", "datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make the directory for this section subprocess.call('mkdir -p", "psutil import time vers_to_run = [ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64] in_vers", "-p '+out_img_dir,shell=True) # Get list of files in directory file_list = [f for", "fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list] for i, f in enumerate(futures.as_completed(fs)): #", "0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 6, 6,", "6, 6, 6, 6] num_threads = 14 # The directory to convert datasetpath", "Convert to png # im = Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') # Run the given pipeline", "as executor: fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list] for i, f in", "file_name in file_list] for i, f in enumerate(futures.as_completed(fs)): # Write progress to error", "python from __future__ import unicode_literals from PIL import Image from subprocess import check_call", "4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64] in_vers = [ 0, 0, 0, 0, 0,", "8, 9,10,11,12,58,59,60,61,62,63,64] in_vers = [ 0, 0, 0, 0, 0, 0, 0, 0,", "'+temp_dir,shell=True) for i, version in enumerate(vers_to_run): in_version = in_vers[i] subprocess.call('make --directory ../common/ version='+str(version),shell=True)", "datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make the directory for this section subprocess.call('mkdir -p '+out_img_dir,shell=True) # Get", "subprocess import check_call from concurrent import futures import subprocess import os import io", "5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64] in_vers = [ 0, 0, 0, 0, 0, 0,", "import unicode_literals from PIL import Image from subprocess import check_call from concurrent import", "import check_call from concurrent import futures import subprocess import os import io import", "0, 0, 0, 0, 6, 6, 6, 6, 6, 6, 6] num_threads =", "'temp_'+str(os.getpid()) subprocess.call('mkdir -p '+temp_dir,shell=True) # Convert to png # im = Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png')", "the png subprocess.call('../common/pipeline_V'+str(version) + '.o ' + temp_dir + '/' + file_name +", "9,10,11,12,58,59,60,61,62,63,64] in_vers = [ 0, 0, 0, 0, 0, 0, 0, 0, 0,", "join import psutil import time vers_to_run = [ 3, 4, 5, 7, 8,", "0, 6, 6, 6, 6, 6, 6, 6] num_threads = 14 # The", "in_vers[i] subprocess.call('make --directory ../common/ version='+str(version),shell=True) # Copy all but the JPEG images subprocess.call('rsync", "from os.path import isfile, join import psutil import time vers_to_run = [ 3,", "vers_to_run = [ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64] in_vers = [ 0,", "Copy all but the JPEG images subprocess.call('rsync -av '+ datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+' '+", "in listdir(in_img_dir) if isfile(join(in_img_dir, f))] file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads) as executor: fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir)", "datasetpath+'/v'+str(version)+' '+ '--exclude VOC2007/JPEGImages', shell=True) in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make", "f in enumerate(futures.as_completed(fs)): # Write progress to error so that it can be", "num_threads = 14 # The directory to convert datasetpath = '/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir):", "of files in directory file_list = [f for f in listdir(in_img_dir) if isfile(join(in_img_dir,", "/usr/bin/env python from __future__ import unicode_literals from PIL import Image from subprocess import", "= 14 # The directory to convert datasetpath = '/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir): #", "datasetpath = '/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir): # Make temp directory temp_dir = 'temp_'+str(os.getpid()) subprocess.call('mkdir", "subprocess.call('rsync -av '+ datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+' '+ '--exclude VOC2007/JPEGImages', shell=True) in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/'", "Get list of files in directory file_list = [f for f in listdir(in_img_dir)", "if isfile(join(in_img_dir, f))] file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads) as executor: fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name", "jpeg and save im = Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) # Delete temp directory subprocess.call('rm -rf", "0, 0, 0, 0, 0, 0, 0, 6, 6, 6, 6, 6, 6,", "[ 3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64] in_vers = [ 0, 0, 0,", "'+ datasetpath+'/v'+str(in_version)+'/ '+ datasetpath+'/v'+str(version)+' '+ '--exclude VOC2007/JPEGImages', shell=True) in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir =", "png subprocess.call('../common/pipeline_V'+str(version) + '.o ' + temp_dir + '/' + file_name + '_temp.png", "import isfile, join import psutil import time vers_to_run = [ 3, 4, 5,", "import subprocess import sys from os import listdir from os.path import isfile, join", "import Image from subprocess import check_call from concurrent import futures import subprocess import", "shell=True) # Convert back to jpeg and save im = Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) #", "for f in listdir(in_img_dir) if isfile(join(in_img_dir, f))] file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads) as executor: fs", "'+ datasetpath+'/v'+str(version)+' '+ '--exclude VOC2007/JPEGImages', shell=True) in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' #", "given pipeline on the png subprocess.call('../common/pipeline_V'+str(version) + '.o ' + temp_dir + '/'", "listdir from os.path import isfile, join import psutil import time vers_to_run = [", "# Convert back to jpeg and save im = Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) # Delete", "im = Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') # Run the given pipeline on the png subprocess.call('../common/pipeline_V'+str(version)", "so that it can be seen sys.stderr.write( \\ \"Converted Image: {} / {}", "+ '.o ' + temp_dir + '/' + file_name + '_temp.png ' +", "+ temp_dir + '/', shell=True) # Convert back to jpeg and save im", "# Convert to png # im = Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') # Run the given", "i, f in enumerate(futures.as_completed(fs)): # Write progress to error so that it can", "to png # im = Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') # Run the given pipeline on", "and save im = Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) # Delete temp directory subprocess.call('rm -rf '+temp_dir,shell=True)", "[f for f in listdir(in_img_dir) if isfile(join(in_img_dir, f))] file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:", "for this section subprocess.call('mkdir -p '+out_img_dir,shell=True) # Get list of files in directory", "futures.ProcessPoolExecutor(max_workers=num_threads) as executor: fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list] for i, f", "temp directory temp_dir = 'temp_'+str(os.getpid()) subprocess.call('mkdir -p '+temp_dir,shell=True) # Convert to png #", "im.save(temp_dir+'/'+file_name+'_temp.png') # Run the given pipeline on the png subprocess.call('../common/pipeline_V'+str(version) + '.o '", "Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') # Run the given pipeline on the png subprocess.call('../common/pipeline_V'+str(version) + '.o", "concurrent import futures import subprocess import os import io import subprocess import sys", "6, 6, 6, 6, 6] num_threads = 14 # The directory to convert", "'.o ' + temp_dir + '/' + file_name + '_temp.png ' + temp_dir", "error so that it can be seen sys.stderr.write( \\ \"Converted Image: {} /", "os import listdir from os.path import isfile, join import psutil import time vers_to_run", "Run the given pipeline on the png subprocess.call('../common/pipeline_V'+str(version) + '.o ' + temp_dir", "on the png subprocess.call('../common/pipeline_V'+str(version) + '.o ' + temp_dir + '/' + file_name", "subprocess.call('make --directory ../common/ version='+str(version),shell=True) # Copy all but the JPEG images subprocess.call('rsync -av", "--directory ../common/ version='+str(version),shell=True) # Copy all but the JPEG images subprocess.call('rsync -av '+", "0, 0, 6, 6, 6, 6, 6, 6, 6] num_threads = 14 #", "subprocess.call('mkdir -p '+out_img_dir,shell=True) # Get list of files in directory file_list = [f", "i, version in enumerate(vers_to_run): in_version = in_vers[i] subprocess.call('make --directory ../common/ version='+str(version),shell=True) # Copy", "' + temp_dir + '/' + file_name + '_temp.png ' + temp_dir +", "Make temp directory temp_dir = 'temp_'+str(os.getpid()) subprocess.call('mkdir -p '+temp_dir,shell=True) # Convert to png", "+ '_temp.png ' + temp_dir + '/', shell=True) # Convert back to jpeg", "files in directory file_list = [f for f in listdir(in_img_dir) if isfile(join(in_img_dir, f))]", "back to jpeg and save im = Image.open(temp_dir+'/'+'output.png') im.save(out_img_dir+'/'+file_name) # Delete temp directory", "f in listdir(in_img_dir) if isfile(join(in_img_dir, f))] file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads) as executor: fs =", "[executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list] for i, f in enumerate(futures.as_completed(fs)): # Write progress", "for i, f in enumerate(futures.as_completed(fs)): # Write progress to error so that it", "= [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6,", "import futures import subprocess import os import io import subprocess import sys from", "-p '+temp_dir,shell=True) # Convert to png # im = Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') # Run", "from os import listdir from os.path import isfile, join import psutil import time", "os.path import isfile, join import psutil import time vers_to_run = [ 3, 4,", "# The directory to convert datasetpath = '/datasets/voc-2007/' def convert_img(file_name,in_img_dir,out_img_dir): # Make temp", "= datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make the directory for this section subprocess.call('mkdir -p '+out_img_dir,shell=True) #", "directory for this section subprocess.call('mkdir -p '+out_img_dir,shell=True) # Get list of files in", "# Run the given pipeline on the png subprocess.call('../common/pipeline_V'+str(version) + '.o ' +", "file_list.sort() with futures.ProcessPoolExecutor(max_workers=num_threads) as executor: fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list] for", "shell=True) in_img_dir = datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make the directory for this", "in directory file_list = [f for f in listdir(in_img_dir) if isfile(join(in_img_dir, f))] file_list.sort()", "# Get list of files in directory file_list = [f for f in", "os import io import subprocess import sys from os import listdir from os.path", "Make the directory for this section subprocess.call('mkdir -p '+out_img_dir,shell=True) # Get list of", "out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make the directory for this section subprocess.call('mkdir -p '+out_img_dir,shell=True)", "with futures.ProcessPoolExecutor(max_workers=num_threads) as executor: fs = [executor.submit(convert_img,file_name,in_img_dir,out_img_dir) for file_name in file_list] for i,", "to error so that it can be seen sys.stderr.write( \\ \"Converted Image: {}", "+ temp_dir + '/' + file_name + '_temp.png ' + temp_dir + '/',", "def convert_img(file_name,in_img_dir,out_img_dir): # Make temp directory temp_dir = 'temp_'+str(os.getpid()) subprocess.call('mkdir -p '+temp_dir,shell=True) #", "'/' + file_name + '_temp.png ' + temp_dir + '/', shell=True) # Convert", "= Image.open(in_img_dir+file_name) im.save(temp_dir+'/'+file_name+'_temp.png') # Run the given pipeline on the png subprocess.call('../common/pipeline_V'+str(version) +", "= datasetpath+'v'+str(in_version)+'/VOC2007/JPEGImages/' out_img_dir = datasetpath+'v'+str(version)+'/VOC2007/JPEGImages/' # Make the directory for this section subprocess.call('mkdir", "directory subprocess.call('rm -rf '+temp_dir,shell=True) for i, version in enumerate(vers_to_run): in_version = in_vers[i] subprocess.call('make", "Delete temp directory subprocess.call('rm -rf '+temp_dir,shell=True) for i, version in enumerate(vers_to_run): in_version =", "3, 4, 5, 7, 8, 9,10,11,12,58,59,60,61,62,63,64] in_vers = [ 0, 0, 0, 0,", "from subprocess import check_call from concurrent import futures import subprocess import os import", "7, 8, 9,10,11,12,58,59,60,61,62,63,64] in_vers = [ 0, 0, 0, 0, 0, 0, 0,", "6, 6, 6, 6, 6, 6, 6] num_threads = 14 # The directory" ]
[ "Constants # --------- CONSTANT = (Literal(\"#\").suppress() + Literal(\"define\").suppress() + IDENTIFIER(\"name\") + restOfLine(\"value\")) class", "Struct(name = token.type, content = token.content[0]) structs.append(S) for variable in token.variables: size =", "start, end) in DECLARATION.scanString(code): for variable in token.variables: size = '' if not", "Functions prototype and definitions for (token, start, end) in FUNCTION.scanString(code): parameters = []", "return\" \"mat2 mat3 mat4\" \"vec2 vec3 vec4 ivec2 ivec3 ivec4 bvec2 bvec3 bvec4", "abstract syntax list \"\"\" constants = [] structs = [] variables = []", "s = \"struct %s %s;\" % (self.name, self.content) return s def parse(code): \"\"\"", "= T, name = token.name, parameters = parameters) prototypes.append(P) for parameter in parameters:", "self.code = code.strip() def __str__(self): s = str(self.type) + \" %s (\" %", "if self.storage: s += \"%s \" % self.storage if self.precision: s += \"%s", "self.name if self.type.size: s += \"[%s]\" % self.size return s class Variable(object): def", "noinline volatile public static extern external\" \"interface flat long short double half fixed", "Type): other = base self.base = other.base self.size = other.size self.storage = other.storage", "= parameters def __str__(self): s = str(self.type) + \" %s (\" % self.alias", "Prototype(object): def __init__(self, type, name, parameters): self.type = Type(type) self.name = name.strip() self.alias", "= parameter.precision, size = parameter.size), name = parameter.name, inout = parameter.inout) parameters.append(P) T", "+ Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN + ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON)) FUNCTION.ignore(cStyleComment) # Struct definitions", "RPAREN + ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON)) FUNCTION.ignore(cStyleComment) # Struct definitions & declarations #", "P = Prototype(type = T, name = token.name, parameters = parameters) prototypes.append(P) for", "s += \"%s\" % self.base return s def __eq__(self, other): return (self.base ==", "\" s += self.code return s class Constant(object): def __init__(self, name, value): self.name", "T, name = token.name, parameters = parameters) prototypes.append(P) for parameter in parameters: parameter.function", "self.size = size.strip() self.storage = storage.strip() self.precision = precision.strip() def __str__(self): s =", "(self.name, self.content) return s def parse(code): \"\"\" Parse a GLSL source code into", "= [] for parameter in token.parameters: size = '' if not parameter.size else", "name = parameter.name, inout = parameter.inout) parameters.append(P) T = Type(base = token.type, storage", "INTEGER | IDENTIFIER OPERATOR = oneOf(\"+ - * / [ ] . &", "RPAREN = Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK, RBRACK = Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE, RBRACE = Literal(\"{\").suppress(),", "+= \"%s\" % self.name if self.type.size: s += \"[%s]\" % self.size return s", "name.strip() self.alias = name.strip() self.parameters = parameters self.code = code.strip() def __str__(self): s", "if not variable.value else variable.value[0] V = Variable(Type(base = token.type, storage = token.storage,", "\"#define %s %s\" % (self.alias, self.value) return s def __eq__(self, other): return self.value", "= Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\")) + Optional(LBRACK +", "for parameter in token.parameters: size = '' if not parameter.size else parameter.size[0] P", "= Function( type = T, name = token.name, parameters = parameters, code =", "+ \" \" if self.name: s += \"%s\" % self.name if self.type.size: s", "nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES) + SEMICOLON) STRUCT.ignore(cStyleComment) # Constants # --------- CONSTANT =", "= T, name = token.name, parameters = parameters, code = token.code[0]) functions.append(F) for", "parameter # ------------------ PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\") +", "LBRACK, RBRACK = Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE, RBRACE = Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON, COMMA =", "self.precision = other.precision else: self.base = base.strip() self.size = size.strip() self.storage = storage.strip()", "[] structs = [] variables = [] prototypes= [] functions = [] #", "= Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL = Literal(\"=\").suppress() SIZE = INTEGER | IDENTIFIER OPERATOR =", "self.storage = other.storage self.precision = other.precision else: self.base = base.strip() self.size = size.strip()", "\" %s (\" % self.alias for i, parameter in enumerate(self.parameters): s += str(parameter)", "end) in FUNCTION.scanString(code): parameters = [] for parameter in token.parameters: size = ''", "size = token.size) if token.code: F = Function( type = T, name =", "DECLARATION = (STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + VARIABLES + SEMICOLON) DECLARATION.ignore(cStyleComment) #", "delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE = (IDENTIFIER(\"name\") + Optional(LBRACK + SIZE + RBRACK)(\"size\") + Optional(EQUAL", "# Function prototypes # ------------------- FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") +", "self.inout s += str(self.type) + \" \" if self.name: s += \"%s\" %", "\"[%s]\" % self.size return s class Variable(object): def __init__(self, type, name, value=None): self.type", "= base self.base = other.base self.size = other.size self.storage = other.storage self.precision =", "external\" \"interface flat long short double half fixed unsigned superp\" \"input output\" \"hvec2", "return s class Function(object): def __init__(self, type, name, parameters, code): self.type = Type(type)", "s = \"\" if self.storage: s += \"%s \" % self.storage if self.precision:", "# --------------------- PART = nestedExpr() | nestedExpr('{','}') | IDENTIFIER | INTEGER | FLOAT", "FUNCTION.scanString(code): parameters = [] for parameter in token.parameters: size = '' if not", "FLOAT | OPERATOR EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE = (IDENTIFIER(\"name\") + Optional(LBRACK +", "mediump highp precision invariant\" \"discard return\" \"mat2 mat3 mat4\" \"vec2 vec3 vec4 ivec2", "Constant(name = token.name, value = token.value) constants.append(C) # Variables for (token, start, end)", "Literal(\",\").suppress() EQUAL = Literal(\"=\").suppress() SIZE = INTEGER | IDENTIFIER OPERATOR = oneOf(\"+ -", "-*- # ----------------------------------------------------------------------------- # Copyright (c) 2015, <NAME> # Distributed under the (new)", "% (self.name, self.content) return s def parse(code): \"\"\" Parse a GLSL source code", "in DECLARATION.scanString(code): for variable in token.variables: size = '' if not variable.size else", "s class Constant(object): def __init__(self, name, value): self.name = name.strip() self.alias = name.strip()", "OPERATOR EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE = (IDENTIFIER(\"name\") + Optional(LBRACK + SIZE +", "parameters.append(P) T = Type(base = token.type, storage = token.storage, precision = token.precision, size", "= Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL = Regex('(0[0-7]*)') INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)') INTEGER = INT_HEXADECIMAL | INT_OCTAL", "Copyright (c) 2015, <NAME> # Distributed under the (new) BSD License. See LICENSE.txt", "\"%s\" % self.base return s def __eq__(self, other): return (self.base == other.base and", "parameters, code): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.parameters =", "parameters): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.parameters = parameters", "--------------------- PART = nestedExpr() | nestedExpr('{','}') | IDENTIFIER | INTEGER | FLOAT |", "# Copyright (c) 2015, <NAME> # Distributed under the (new) BSD License. See", "STRUCT.ignore(cStyleComment) # Constants # --------- CONSTANT = (Literal(\"#\").suppress() + Literal(\"define\").suppress() + IDENTIFIER(\"name\") +", "= (Literal(\"#\").suppress() + Literal(\"define\").suppress() + IDENTIFIER(\"name\") + restOfLine(\"value\")) class Type(object): def __init__(self, base=None,", "= token.name, value = token.value) constants.append(C) # Variables for (token, start, end) in", "------------------ PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\")) +", "T = Type(base = token.type, storage = token.storage, precision = token.precision, size =", "IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL = Regex('(0[0-7]*)') INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)') INTEGER", "= token.type, storage = token.storage, precision = token.precision, size = token.size) if token.code:", "content): self.name = name.strip() self.content = content.strip() def __str__(self): s = \"struct %s", "s class Variable(object): def __init__(self, type, name, value=None): self.type = Type(type) self.name =", "and definitions for (token, start, end) in FUNCTION.scanString(code): parameters = [] for parameter", "Regex('(0[0-7]*)') INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)') INTEGER = INT_HEXADECIMAL | INT_OCTAL | INT_DECIMAL FLOAT =", "VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION = (STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + VARIABLES +", "+ IDENTIFIER(\"type\") + VARIABLES + SEMICOLON) DECLARATION.ignore(cStyleComment) # Function parameter # ------------------ PARAMETER", "= token.precision, size = size), name = variable.name, value = value) variables.append(V) #", "the (new) BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- from pyparsing", "== other.size and self.precision == other.precision) class Parameter(object): def __init__(self, type, name=None, inout=\"in\"):", "__str__(self): s = \"#define %s %s\" % (self.alias, self.value) return s def __eq__(self,", "if isinstance(base, Type): other = base self.base = other.base self.size = other.size self.storage", "= Type(type) self.name = name.strip() self.alias = name.strip() self.value = value.strip() def __str__(self):", "s def parse(code): \"\"\" Parse a GLSL source code into an abstract syntax", "base.strip() self.size = size.strip() self.storage = storage.strip() self.precision = precision.strip() def __str__(self): s", "parameter.size[0] P = Parameter(type = Type(base = parameter.type, storage = parameter.storage, precision =", "| INT_OCTAL | INT_DECIMAL FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN = Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK,", "Prototype(type = T, name = token.name, parameters = parameters) prototypes.append(P) for parameter in", "class Prototype(object): def __init__(self, type, name, parameters): self.type = Type(type) self.name = name.strip()", "\"class union enum typedef template this packed\" \"goto switch default\" \"inline noinline volatile", "= code.strip() def __str__(self): s = str(self.type) + \" %s (\" % self.alias", "return s class Constant(object): def __init__(self, name, value): self.name = name.strip() self.alias =", "self.content) return s def parse(code): \"\"\" Parse a GLSL source code into an", "more info. # ----------------------------------------------------------------------------- from pyparsing import * keywords = (\"attribute const uniform", "from pyparsing import * keywords = (\"attribute const uniform varying break continue do", "# Struct definitions & declarations for (token, start, end) in STRUCT.scanString(code): S =", "Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER = Literal(\"const\") INVARIANT_QUALIFIER = Literal(\"invariant\") PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[", "self.value == other.value class Struct(object): def __init__(self, name, content): self.name = name.strip() self.content", "parameter.size), name = parameter.name, inout = parameter.inout) parameters.append(P) T = Type(base = token.type,", "+ \" \" + self.alias if self.type.size: s += \"[%s]\" % self.type.size if", "= [] prototypes= [] functions = [] # Constants for (token, start, end)", "%s (\" % self.alias for i, parameter in enumerate(self.parameters): s += str(parameter) if", "def __str__(self): s = \"#define %s %s\" % (self.alias, self.value) return s def", "+= \");\" return s class Function(object): def __init__(self, type, name, parameters, code): self.type", "self.name = name.strip() self.content = content.strip() def __str__(self): s = \"struct %s %s;\"", "LICENSE.txt for more info. # ----------------------------------------------------------------------------- from pyparsing import * keywords = (\"attribute", "= INT_HEXADECIMAL | INT_OCTAL | INT_DECIMAL FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN = Literal(\"(\").suppress(),", "\"%s \" % self.inout s += str(self.type) + \" \" if self.name: s", "Constant(object): def __init__(self, name, value): self.name = name.strip() self.alias = name.strip() self.value =", "switch default\" \"inline noinline volatile public static extern external\" \"interface flat long short", "keywords = (\"attribute const uniform varying break continue do for while\" \"if else\"", "\"input output\" \"hvec2 hvec3 hvec4 dvec2 dvec3 dvec4 fvec2 fvec3 fvec4 sampler1D sampler3D\"", "| SEMICOLON)) FUNCTION.ignore(cStyleComment) # Struct definitions & declarations # --------------------------------- STRUCT = (", "Function prototypes # ------------------- FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + IDENTIFIER(\"name\")", "= str(self.type) + \" %s (\" % self.alias for i, parameter in enumerate(self.parameters):", "= [] structs = [] variables = [] prototypes= [] functions = []", "IDENTIFIER | INTEGER | FLOAT | OPERATOR EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE =", "storage = token.storage, precision = token.precision, size = size), name = variable.name, value", "self.value = value.strip() def __str__(self): s = \"#define %s %s\" % (self.alias, self.value)", "type, name=None, inout=\"in\"): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.inout", "name.strip() self.alias = name.strip() self.value = value.strip() def __str__(self): s = str(self.type) +", "name, value=None): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.value =", "and self.size == other.size and self.precision == other.precision) class Parameter(object): def __init__(self, type,", "+ IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\")) + Optional(LBRACK + SIZE + RBRACK)(\"size\")) # Function prototypes", "variable in token.variables: size = '' if not variable.size else variable.size[0] value =", "variable.size else variable.size[0] value = '' if not variable.value else variable.value[0] V =", "\"float int void bool true false\" \"lowp mediump highp precision invariant\" \"discard return\"", "INTEGER = INT_HEXADECIMAL | INT_OCTAL | INT_DECIMAL FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN =", "syntax list \"\"\" constants = [] structs = [] variables = [] prototypes=", "double half fixed unsigned superp\" \"input output\" \"hvec2 hvec3 hvec4 dvec2 dvec3 dvec4", "nestedExpr() | nestedExpr('{','}') | IDENTIFIER | INTEGER | FLOAT | OPERATOR EXPR =", "Type(type) self.name = name.strip() self.alias = name.strip() self.parameters = parameters def __str__(self): s", "self.size = other.size self.storage = other.storage self.precision = other.precision else: self.base = base.strip()", "OPERATOR = oneOf(\"+ - * / [ ] . & ^ ! {", "Literal(\"define\").suppress() + IDENTIFIER(\"name\") + restOfLine(\"value\")) class Type(object): def __init__(self, base=None, storage=None, precision=None, size=None):", "self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.parameters = parameters def", "+ Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + IDENTIFIER(\"name\") + LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN +", "samplerCube\" \"struct\") reserved = (\"asm\" \"class union enum typedef template this packed\" \"goto", "= token.storage, precision = token.precision, size = token.size) if token.code: F = Function(", "INVARIANT_QUALIFIER = Literal(\"invariant\") PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[ \\t\\n]\") # Variable declarations", "# --------- CONSTANT = (Literal(\"#\").suppress() + Literal(\"define\").suppress() + IDENTIFIER(\"name\") + restOfLine(\"value\")) class Type(object):", "size=None): if isinstance(base, Type): other = base self.base = other.base self.size = other.size", "[] functions = [] # Constants for (token, start, end) in CONSTANT.scanString(code): C", "(\"asm\" \"class union enum typedef template this packed\" \"goto switch default\" \"inline noinline", "= token.type, storage = token.storage, precision = token.precision, size = size), name =", "parameter.inout) parameters.append(P) T = Type(base = token.type, storage = token.storage, precision = token.precision,", "= nestedExpr() | nestedExpr('{','}') | IDENTIFIER | INTEGER | FLOAT | OPERATOR EXPR", "class Function(object): def __init__(self, type, name, parameters, code): self.type = Type(type) self.name =", "for variable in token.variables: size = '' if not variable.size else variable.size[0] value", "= '' if not variable.value else variable.value[0] V = Variable(Type(base = token.type, storage", "bvec4 sampler2D samplerCube\" \"struct\") reserved = (\"asm\" \"class union enum typedef template this", "structs = [] variables = [] prototypes= [] functions = [] # Constants", "output\" \"hvec2 hvec3 hvec4 dvec2 dvec3 dvec4 fvec2 fvec3 fvec4 sampler1D sampler3D\" \"sampler1DShadow", "content.strip() def __str__(self): s = \"struct %s %s;\" % (self.name, self.content) return s", "and self.precision == other.precision) class Parameter(object): def __init__(self, type, name=None, inout=\"in\"): self.type =", "def __str__(self): s = \"struct %s %s;\" % (self.name, self.content) return s def", "size = size), name = variable.name, value = value) variables.append(V) # Functions prototype", "i < len(self.parameters)-1: s+= \", \" s += \");\" return s class Function(object):", "fixed unsigned superp\" \"input output\" \"hvec2 hvec3 hvec4 dvec2 dvec3 dvec4 fvec2 fvec3", "F else: P = Prototype(type = T, name = token.name, parameters = parameters)", "value = value) variables.append(V) # Struct definitions & declarations for (token, start, end)", "Optional(LBRACK + SIZE + RBRACK)(\"size\")) # Function prototypes # ------------------- FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\")", "(Literal(\"#\").suppress() + Literal(\"define\").suppress() + IDENTIFIER(\"name\") + restOfLine(\"value\")) class Type(object): def __init__(self, base=None, storage=None,", "type, name, parameters, code): self.type = Type(type) self.name = name.strip() self.alias = name.strip()", "s += str(parameter) if i < len(self.parameters)-1: s+= \", \" s += \")", "s += self.code return s class Constant(object): def __init__(self, name, value): self.name =", "token.content[0]) structs.append(S) for variable in token.variables: size = '' if not variable.size else", "Optional(IDENTIFIER(\"name\")) + Optional(LBRACK + SIZE + RBRACK)(\"size\")) # Function prototypes # ------------------- FUNCTION", "return (self.base == other.base and self.size == other.size and self.precision == other.precision) class", "= name.strip() self.inout = inout.strip() def __str__(self): s = \"\" if self.inout: s", "token.code: F = Function( type = T, name = token.name, parameters = parameters,", "= (\"attribute const uniform varying break continue do for while\" \"if else\" \"in", "varying break continue do for while\" \"if else\" \"in out inout\" \"float int", "self.precision == other.precision) class Parameter(object): def __init__(self, type, name=None, inout=\"in\"): self.type = Type(type)", "% (self.alias, self.value) return s def __eq__(self, other): return self.value == other.value class", "= other.storage self.precision = other.precision else: self.base = base.strip() self.size = size.strip() self.storage", "uniform varying break continue do for while\" \"if else\" \"in out inout\" \"float", "type = T, name = token.name, parameters = parameters, code = token.code[0]) functions.append(F)", "self.name = name.strip() self.alias = name.strip() self.value = value.strip() def __str__(self): s =", "= parameter.type, storage = parameter.storage, precision = parameter.precision, size = parameter.size), name =", "for (token, start, end) in STRUCT.scanString(code): S = Struct(name = token.type, content =", "if i < len(self.parameters)-1: s+= \", \" s += \");\" return s class", "into an abstract syntax list \"\"\" constants = [] structs = [] variables", "self.value s += \";\" return s class Prototype(object): def __init__(self, type, name, parameters):", "if self.inout: s += \"%s \" % self.inout s += str(self.type) + \"", "delim=Empty()).setParseAction(keepOriginalText) VARIABLE = (IDENTIFIER(\"name\") + Optional(LBRACK + SIZE + RBRACK)(\"size\") + Optional(EQUAL +", "\"%s \" % self.storage if self.precision: s += \"%s \" % self.precision s", "under the (new) BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- from", "%s;\" % (self.name, self.content) return s def parse(code): \"\"\" Parse a GLSL source", "parameter.size else parameter.size[0] P = Parameter(type = Type(base = parameter.type, storage = parameter.storage,", "name.strip() self.parameters = parameters def __str__(self): s = str(self.type) + \" %s (\"", "__str__(self): s = str(self.type) + \" \" + self.alias if self.type.size: s +=", "structs.append(S) for variable in token.variables: size = '' if not variable.size else variable.size[0]", "+= \") \" s += self.code return s class Constant(object): def __init__(self, name,", "self.value = value.strip() def __str__(self): s = str(self.type) + \" \" + self.alias", "Type(type) self.name = name.strip() self.alias = name.strip() self.parameters = parameters self.code = code.strip()", "RBRACK = Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE, RBRACE = Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON, COMMA = Literal(\";\").suppress(),", "SEMICOLON, COMMA = Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL = Literal(\"=\").suppress() SIZE = INTEGER | IDENTIFIER", "+ IDENTIFIER(\"name\") + restOfLine(\"value\")) class Type(object): def __init__(self, base=None, storage=None, precision=None, size=None): if", "self.inout: s += \"%s \" % self.inout s += str(self.type) + \" \"", "= [] variables = [] prototypes= [] functions = [] # Constants for", "(token, start, end) in STRUCT.scanString(code): S = Struct(name = token.type, content = token.content[0])", "hvec4 dvec2 dvec3 dvec4 fvec2 fvec3 fvec4 sampler1D sampler3D\" \"sampler1DShadow sampler2DShadow\" \"sampler2DRect sampler3DRect", "self.storage = storage.strip() self.precision = precision.strip() def __str__(self): s = \"\" if self.storage:", "Function( type = T, name = token.name, parameters = parameters, code = token.code[0])", "Regex('(0[xX][0-9a-fA-F]*)') INTEGER = INT_HEXADECIMAL | INT_OCTAL | INT_DECIMAL FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN", "token.name, value = token.value) constants.append(C) # Variables for (token, start, end) in DECLARATION.scanString(code):", "+ RBRACK)(\"size\") + Optional(EQUAL + EXPR)(\"value\")) VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION = (STORAGE_QUALIFIER(\"storage\") +", "# Variables for (token, start, end) in DECLARATION.scanString(code): for variable in token.variables: size", "= token.name, parameters = parameters) prototypes.append(P) for parameter in parameters: parameter.function = None", "+ restOfLine(\"value\")) class Type(object): def __init__(self, base=None, storage=None, precision=None, size=None): if isinstance(base, Type):", "parameter.function = F else: P = Prototype(type = T, name = token.name, parameters", "in CONSTANT.scanString(code): C = Constant(name = token.name, value = token.value) constants.append(C) # Variables", "__init__(self, type, name=None, inout=\"in\"): self.type = Type(type) self.name = name.strip() self.alias = name.strip()", "= Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL = Regex('(0[0-7]*)') INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)') INTEGER =", "= Prototype(type = T, name = token.name, parameters = parameters) prototypes.append(P) for parameter", "parameters) prototypes.append(P) for parameter in parameters: parameter.function = None return constants, structs, variables,", "end) in DECLARATION.scanString(code): for variable in token.variables: size = '' if not variable.size", "= other.precision else: self.base = base.strip() self.size = size.strip() self.storage = storage.strip() self.precision", "+= \";\" return s class Prototype(object): def __init__(self, type, name, parameters): self.type =", "name.strip() self.inout = inout.strip() def __str__(self): s = \"\" if self.inout: s +=", "prototypes # ------------------- FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + IDENTIFIER(\"name\") +", "S = Struct(name = token.type, content = token.content[0]) structs.append(S) for variable in token.variables:", "precision=None, size=None): if isinstance(base, Type): other = base self.base = other.base self.size =", "= name.strip() self.parameters = parameters def __str__(self): s = str(self.type) + \" %s", "def __init__(self, base=None, storage=None, precision=None, size=None): if isinstance(base, Type): other = base self.base", "static extern external\" \"interface flat long short double half fixed unsigned superp\" \"input", "self.alias for i, parameter in enumerate(self.parameters): s += str(parameter) if i < len(self.parameters)-1:", "Literal(\")\").suppress() LBRACK, RBRACK = Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE, RBRACE = Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON, COMMA", "Optional(EQUAL + EXPR)(\"value\")) VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION = (STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\")", "CONSTANT = (Literal(\"#\").suppress() + Literal(\"define\").suppress() + IDENTIFIER(\"name\") + restOfLine(\"value\")) class Type(object): def __init__(self,", "%s %s\" % (self.alias, self.value) return s def __eq__(self, other): return self.value ==", "\"discard return\" \"mat2 mat3 mat4\" \"vec2 vec3 vec4 ivec2 ivec3 ivec4 bvec2 bvec3", "= (IDENTIFIER(\"name\") + Optional(LBRACK + SIZE + RBRACK)(\"size\") + Optional(EQUAL + EXPR)(\"value\")) VARIABLES", "= (\"asm\" \"class union enum typedef template this packed\" \"goto switch default\" \"inline", "| INT_DECIMAL FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN = Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK, RBRACK =", "name.strip() self.content = content.strip() def __str__(self): s = \"struct %s %s;\" % (self.name,", "__eq__(self, other): return (self.base == other.base and self.size == other.size and self.precision ==", "mat4\" \"vec2 vec3 vec4 ivec2 ivec3 ivec4 bvec2 bvec3 bvec4 sampler2D samplerCube\" \"struct\")", "s += \"%s \" % self.inout s += str(self.type) + \" \" if", "= F else: P = Prototype(type = T, name = token.name, parameters =", "else variable.value[0] V = Variable(Type(base = token.type, storage = token.storage, precision = token.precision,", "in STRUCT.scanString(code): S = Struct(name = token.type, content = token.content[0]) structs.append(S) for variable", "\"\" if self.inout: s += \"%s \" % self.inout s += str(self.type) +", "name.strip() self.alias = name.strip() self.parameters = parameters def __str__(self): s = str(self.type) +", "for parameter in parameters: parameter.function = F else: P = Prototype(type = T,", "size = size), name = variable.name, value = value) variables.append(V) # Struct definitions", "= value) variables.append(V) # Struct definitions & declarations for (token, start, end) in", "false\" \"lowp mediump highp precision invariant\" \"discard return\" \"mat2 mat3 mat4\" \"vec2 vec3", "# Variable declarations # --------------------- PART = nestedExpr() | nestedExpr('{','}') | IDENTIFIER |", "# Distributed under the (new) BSD License. See LICENSE.txt for more info. #", "self.base = other.base self.size = other.size self.storage = other.storage self.precision = other.precision else:", "inout=\"in\"): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.inout = inout.strip()", "self.alias = name.strip() self.inout = inout.strip() def __str__(self): s = \"\" if self.inout:", "sampler1D sampler3D\" \"sampler1DShadow sampler2DShadow\" \"sampler2DRect sampler3DRect sampler2DRectShadow\" \"sizeof cast\" \"namespace using\") IDENTIFIER =", "& declarations for (token, start, end) in STRUCT.scanString(code): S = Struct(name = token.type,", "= parameters) prototypes.append(P) for parameter in parameters: parameter.function = None return constants, structs,", "for (token, start, end) in CONSTANT.scanString(code): C = Constant(name = token.name, value =", "else: self.base = base.strip() self.size = size.strip() self.storage = storage.strip() self.precision = precision.strip()", "do for while\" \"if else\" \"in out inout\" \"float int void bool true", "\"\" if self.storage: s += \"%s \" % self.storage if self.precision: s +=", "type, name, parameters): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.parameters", "= parameter.inout) parameters.append(P) T = Type(base = token.type, storage = token.storage, precision =", "bvec2 bvec3 bvec4 sampler2D samplerCube\" \"struct\") reserved = (\"asm\" \"class union enum typedef", "T, name = token.name, parameters = parameters, code = token.code[0]) functions.append(F) for parameter", "EQUAL = Literal(\"=\").suppress() SIZE = INTEGER | IDENTIFIER OPERATOR = oneOf(\"+ - *", "Variable(Type(base = token.type, size = size), name = variable.name, value = value) variables.append(V)", "= name.strip() self.parameters = parameters self.code = code.strip() def __str__(self): s = str(self.type)", "token.storage, precision = token.precision, size = size), name = variable.name, value = value)", "\"\"\" constants = [] structs = [] variables = [] prototypes= [] functions", "% self.base return s def __eq__(self, other): return (self.base == other.base and self.size", "s class Prototype(object): def __init__(self, type, name, parameters): self.type = Type(type) self.name =", "\"sampler2DRect sampler3DRect sampler2DRectShadow\" \"sizeof cast\" \"namespace using\") IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))')", "(self.alias, self.value) return s def __eq__(self, other): return self.value == other.value class Struct(object):", "(STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + VARIABLES + SEMICOLON) DECLARATION.ignore(cStyleComment) # Function parameter", "name.strip() self.parameters = parameters self.code = code.strip() def __str__(self): s = str(self.type) +", "variable.name, value = value) variables.append(V) # Struct definitions & declarations for (token, start,", "Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN = Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK, RBRACK = Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE, RBRACE", "Optional(LBRACK + SIZE + RBRACK)(\"size\") + Optional(EQUAL + EXPR)(\"value\")) VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION", "s += \") \" s += self.code return s class Constant(object): def __init__(self,", "= precision.strip() def __str__(self): s = \"\" if self.storage: s += \"%s \"", "__str__(self): s = \"\" if self.storage: s += \"%s \" % self.storage if", "__init__(self, type, name, parameters): self.type = Type(type) self.name = name.strip() self.alias = name.strip()", "void bool true false\" \"lowp mediump highp precision invariant\" \"discard return\" \"mat2 mat3", "vec3 vec4 ivec2 ivec3 ivec4 bvec2 bvec3 bvec4 sampler2D samplerCube\" \"struct\") reserved =", "class Variable(object): def __init__(self, type, name, value=None): self.type = Type(type) self.name = name.strip()", "prototypes= [] functions = [] # Constants for (token, start, end) in CONSTANT.scanString(code):", "self.name = name.strip() self.alias = name.strip() self.parameters = parameters def __str__(self): s =", "volatile public static extern external\" \"interface flat long short double half fixed unsigned", "= content.strip() def __str__(self): s = \"struct %s %s;\" % (self.name, self.content) return", "INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)') INTEGER = INT_HEXADECIMAL | INT_OCTAL | INT_DECIMAL FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)')", "= Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[ \\t\\n]\") # Variable declarations # --------------------- PART =", "ivec4 bvec2 bvec3 bvec4 sampler2D samplerCube\" \"struct\") reserved = (\"asm\" \"class union enum", "s += \"%s \" % self.precision s += \"%s\" % self.base return s", "= token.storage, precision = token.precision, size = size), name = variable.name, value =", "= token.code[0]) functions.append(F) for parameter in parameters: parameter.function = F else: P =", "Constants for (token, start, end) in CONSTANT.scanString(code): C = Constant(name = token.name, value", "| IDENTIFIER | INTEGER | FLOAT | OPERATOR EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE", "+= \"[%s]\" % self.size return s class Variable(object): def __init__(self, type, name, value=None):", "parameter.type, storage = parameter.storage, precision = parameter.precision, size = parameter.size), name = parameter.name,", "definitions & declarations for (token, start, end) in STRUCT.scanString(code): S = Struct(name =", "self.storage: s += \"%s \" % self.storage if self.precision: s += \"%s \"", "size = '' if not variable.size else variable.size[0] value = '' if not", "| IDENTIFIER OPERATOR = oneOf(\"+ - * / [ ] . & ^", "= Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE, RBRACE = Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON, COMMA = Literal(\";\").suppress(), Literal(\",\").suppress()", "EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE = (IDENTIFIER(\"name\") + Optional(LBRACK + SIZE + RBRACK)(\"size\")", "+ nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES) + SEMICOLON) STRUCT.ignore(cStyleComment) # Constants # --------- CONSTANT", "functions = [] # Constants for (token, start, end) in CONSTANT.scanString(code): C =", "else variable.value[0] V = Variable(Type(base = token.type, size = size), name = variable.name,", "name = variable.name, value = value) variables.append(V) # Struct definitions & declarations for", "= Regex(\"(in|out|inout)[ \\t\\n]\") # Variable declarations # --------------------- PART = nestedExpr() | nestedExpr('{','}')", "if self.value: s += \" = %s\" % self.value s += \";\" return", "using\") IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL = Regex('(0[0-7]*)') INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)')", "+ Literal(\"define\").suppress() + IDENTIFIER(\"name\") + restOfLine(\"value\")) class Type(object): def __init__(self, base=None, storage=None, precision=None,", "[] for parameter in token.parameters: size = '' if not parameter.size else parameter.size[0]", "self.type.size: s += \"[%s]\" % self.size return s class Variable(object): def __init__(self, type,", "s = str(self.type) + \" %s (\" % self.alias for i, parameter in", "base=None, storage=None, precision=None, size=None): if isinstance(base, Type): other = base self.base = other.base", "variables = [] prototypes= [] functions = [] # Constants for (token, start,", "self.base = base.strip() self.size = size.strip() self.storage = storage.strip() self.precision = precision.strip() def", "reserved = (\"asm\" \"class union enum typedef template this packed\" \"goto switch default\"", "PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[ \\t\\n]\") # Variable declarations # --------------------- PART = nestedExpr() |", "__str__(self): s = str(self.type) + \" %s (\" % self.alias for i, parameter", "parameters = [] for parameter in token.parameters: size = '' if not parameter.size", "parameters = parameters, code = token.code[0]) functions.append(F) for parameter in parameters: parameter.function =", "self.precision: s += \"%s \" % self.precision s += \"%s\" % self.base return", "__str__(self): s = \"\" if self.inout: s += \"%s \" % self.inout s", "\"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON)) FUNCTION.ignore(cStyleComment) # Struct definitions & declarations # --------------------------------- STRUCT =", "= INTEGER | IDENTIFIER OPERATOR = oneOf(\"+ - * / [ ] .", "def __init__(self, name, value): self.name = name.strip() self.alias = name.strip() self.value = value.strip()", "% self.value s += \";\" return s class Prototype(object): def __init__(self, type, name,", "else parameter.size[0] P = Parameter(type = Type(base = parameter.type, storage = parameter.storage, precision", "self.name = name.strip() self.alias = name.strip() self.parameters = parameters self.code = code.strip() def", "variable.value[0] V = Variable(Type(base = token.type, storage = token.storage, precision = token.precision, size", "= Type(type) self.name = name.strip() self.alias = name.strip() self.parameters = parameters self.code =", "= ( Literal(\"struct\").suppress() + IDENTIFIER(\"type\") + nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES) + SEMICOLON) STRUCT.ignore(cStyleComment)", "STRUCT.scanString(code): S = Struct(name = token.type, content = token.content[0]) structs.append(S) for variable in", "\" if self.name: s += \"%s\" % self.name if self.type.size: s += \"[%s]\"", "\" % self.storage if self.precision: s += \"%s \" % self.precision s +=", "token.type, size = size), name = variable.name, value = value) variables.append(V) # Functions", "self.precision s += \"%s\" % self.base return s def __eq__(self, other): return (self.base", "= name.strip() self.value = value.strip() def __str__(self): s = str(self.type) + \" \"", "in FUNCTION.scanString(code): parameters = [] for parameter in token.parameters: size = '' if", "\"hvec2 hvec3 hvec4 dvec2 dvec3 dvec4 fvec2 fvec3 fvec4 sampler1D sampler3D\" \"sampler1DShadow sampler2DShadow\"", "+ RBRACK)(\"size\")) # Function prototypes # ------------------- FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") +", "class Type(object): def __init__(self, base=None, storage=None, precision=None, size=None): if isinstance(base, Type): other =", "token.type, storage = token.storage, precision = token.precision, size = size), name = variable.name,", "+= \"[%s]\" % self.type.size if self.value: s += \" = %s\" % self.value", "an abstract syntax list \"\"\" constants = [] structs = [] variables =", "if i < len(self.parameters)-1: s+= \", \" s += \") \" s +=", "%s\" % (self.alias, self.value) return s def __eq__(self, other): return self.value == other.value", "fvec4 sampler1D sampler3D\" \"sampler1DShadow sampler2DShadow\" \"sampler2DRect sampler3DRect sampler2DRectShadow\" \"sizeof cast\" \"namespace using\") IDENTIFIER", "Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL = Literal(\"=\").suppress() SIZE = INTEGER | IDENTIFIER OPERATOR = oneOf(\"+", "FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + IDENTIFIER(\"name\") + LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\")", "variable.value[0] V = Variable(Type(base = token.type, size = size), name = variable.name, value", "<NAME> # Distributed under the (new) BSD License. See LICENSE.txt for more info.", "const uniform varying break continue do for while\" \"if else\" \"in out inout\"", "str(self.type) + \" %s (\" % self.alias for i, parameter in enumerate(self.parameters): s", "+ SEMICOLON) DECLARATION.ignore(cStyleComment) # Function parameter # ------------------ PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\")", "declarations # --------------------- PART = nestedExpr() | nestedExpr('{','}') | IDENTIFIER | INTEGER |", "| FLOAT | OPERATOR EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE = (IDENTIFIER(\"name\") + Optional(LBRACK", "= Literal(\"const\") INVARIANT_QUALIFIER = Literal(\"invariant\") PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[ \\t\\n]\") #", "+= \"%s \" % self.storage if self.precision: s += \"%s \" % self.precision", "if self.precision: s += \"%s \" % self.precision s += \"%s\" % self.base", "VARIABLES + SEMICOLON) DECLARATION.ignore(cStyleComment) # Function parameter # ------------------ PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\") +", "s+= \", \" s += \") \" s += self.code return s class", "pyparsing import * keywords = (\"attribute const uniform varying break continue do for", "= token.precision, size = token.size) if token.code: F = Function( type = T,", "cast\" \"namespace using\") IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL = Regex('(0[0-7]*)') INT_HEXADECIMAL", "# Constants # --------- CONSTANT = (Literal(\"#\").suppress() + Literal(\"define\").suppress() + IDENTIFIER(\"name\") + restOfLine(\"value\"))", "= base.strip() self.size = size.strip() self.storage = storage.strip() self.precision = precision.strip() def __str__(self):", "\" % self.precision s += \"%s\" % self.base return s def __eq__(self, other):", "self.value) return s def __eq__(self, other): return self.value == other.value class Struct(object): def", "size = parameter.size), name = parameter.name, inout = parameter.inout) parameters.append(P) T = Type(base", "constants = [] structs = [] variables = [] prototypes= [] functions =", "\"interface flat long short double half fixed unsigned superp\" \"input output\" \"hvec2 hvec3", "if token.code: F = Function( type = T, name = token.name, parameters =", "= \"\" if self.storage: s += \"%s \" % self.storage if self.precision: s", "type, name, value=None): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.value", "typedef template this packed\" \"goto switch default\" \"inline noinline volatile public static extern", "+= \" = %s\" % self.value s += \";\" return s class Prototype(object):", "value.strip() def __str__(self): s = \"#define %s %s\" % (self.alias, self.value) return s", "\\t\\n]\") # Variable declarations # --------------------- PART = nestedExpr() | nestedExpr('{','}') | IDENTIFIER", "INT_DECIMAL FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN = Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK, RBRACK = Literal(\"[\").suppress(),", "other.storage self.precision = other.precision else: self.base = base.strip() self.size = size.strip() self.storage =", "if self.type.size: s += \"[%s]\" % self.size return s class Variable(object): def __init__(self,", "declarations for (token, start, end) in STRUCT.scanString(code): S = Struct(name = token.type, content", "Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[ \\t\\n]\") # Variable declarations # --------------------- PART = nestedExpr()", "= inout.strip() def __str__(self): s = \"\" if self.inout: s += \"%s \"", "else variable.size[0] value = '' if not variable.value else variable.value[0] V = Variable(Type(base", "name = token.name, parameters = parameters, code = token.code[0]) functions.append(F) for parameter in", "content = token.content[0]) structs.append(S) for variable in token.variables: size = '' if not", "= oneOf(\"+ - * / [ ] . & ^ ! { }\")", "\" s += \");\" return s class Function(object): def __init__(self, type, name, parameters,", "| nestedExpr('{','}') | IDENTIFIER | INTEGER | FLOAT | OPERATOR EXPR = delimitedList(PART,", "P = Parameter(type = Type(base = parameter.type, storage = parameter.storage, precision = parameter.precision,", "IDENTIFIER(\"type\") + nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES) + SEMICOLON) STRUCT.ignore(cStyleComment) # Constants # ---------", "= (STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + VARIABLES + SEMICOLON) DECLARATION.ignore(cStyleComment) # Function", "\" = %s\" % self.value s += \";\" return s class Prototype(object): def", "%s\" % self.value s += \";\" return s class Prototype(object): def __init__(self, type,", "other.base self.size = other.size self.storage = other.storage self.precision = other.precision else: self.base =", "+= str(parameter) if i < len(self.parameters)-1: s+= \", \" s += \");\" return", "- * / [ ] . & ^ ! { }\") STORAGE_QUALIFIER =", "token.variables: size = '' if not variable.size else variable.size[0] value = '' if", "short double half fixed unsigned superp\" \"input output\" \"hvec2 hvec3 hvec4 dvec2 dvec3", "self.parameters = parameters def __str__(self): s = str(self.type) + \" %s (\" %", "name, value): self.name = name.strip() self.alias = name.strip() self.value = value.strip() def __str__(self):", "self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.parameters = parameters self.code", "(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + IDENTIFIER(\"name\") + LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN", "= other.base self.size = other.size self.storage = other.storage self.precision = other.precision else: self.base", "# ------------------- FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + IDENTIFIER(\"name\") + LPAREN", "highp precision invariant\" \"discard return\" \"mat2 mat3 mat4\" \"vec2 vec3 vec4 ivec2 ivec3", "not parameter.size else parameter.size[0] P = Parameter(type = Type(base = parameter.type, storage =", "else: P = Prototype(type = T, name = token.name, parameters = parameters) prototypes.append(P)", "len(self.parameters)-1: s+= \", \" s += \") \" s += self.code return s", "self.alias = name.strip() self.value = value.strip() def __str__(self): s = str(self.type) + \"", "= Type(base = token.type, storage = token.storage, precision = token.precision, size = token.size)", "& ^ ! { }\") STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER = Literal(\"const\") INVARIANT_QUALIFIER =", "= delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE = (IDENTIFIER(\"name\") + Optional(LBRACK + SIZE + RBRACK)(\"size\") +", "return s def __eq__(self, other): return (self.base == other.base and self.size == other.size", "utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2015, <NAME> # Distributed under the", "__init__(self, base=None, storage=None, precision=None, size=None): if isinstance(base, Type): other = base self.base =", "delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION = (STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + VARIABLES + SEMICOLON) DECLARATION.ignore(cStyleComment)", "if self.name: s += \"%s\" % self.name if self.type.size: s += \"[%s]\" %", "not variable.value else variable.value[0] V = Variable(Type(base = token.type, storage = token.storage, precision", "enum typedef template this packed\" \"goto switch default\" \"inline noinline volatile public static", "s += \");\" return s class Function(object): def __init__(self, type, name, parameters, code):", "inout = parameter.inout) parameters.append(P) T = Type(base = token.type, storage = token.storage, precision", "= name.strip() self.alias = name.strip() self.value = value.strip() def __str__(self): s = str(self.type)", "< len(self.parameters)-1: s+= \", \" s += \") \" s += self.code return", "FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN = Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK, RBRACK = Literal(\"[\").suppress(), Literal(\"]\").suppress()", "# Functions prototype and definitions for (token, start, end) in FUNCTION.scanString(code): parameters =", "def __init__(self, type, name, value=None): self.type = Type(type) self.name = name.strip() self.alias =", "out inout\" \"float int void bool true false\" \"lowp mediump highp precision invariant\"", "s = \"\" if self.inout: s += \"%s \" % self.inout s +=", "vec4 ivec2 ivec3 ivec4 bvec2 bvec3 bvec4 sampler2D samplerCube\" \"struct\") reserved = (\"asm\"", "\" + self.alias if self.type.size: s += \"[%s]\" % self.type.size if self.value: s", "str(parameter) if i < len(self.parameters)-1: s+= \", \" s += \");\" return s", "name.strip() self.value = value.strip() def __str__(self): s = \"#define %s %s\" % (self.alias,", "Distributed under the (new) BSD License. See LICENSE.txt for more info. # -----------------------------------------------------------------------------", "(\"attribute const uniform varying break continue do for while\" \"if else\" \"in out", "{ }\") STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER = Literal(\"const\") INVARIANT_QUALIFIER = Literal(\"invariant\") PRECISION_QUALIFIER =", "PART = nestedExpr() | nestedExpr('{','}') | IDENTIFIER | INTEGER | FLOAT | OPERATOR", "= Type(type) self.name = name.strip() self.alias = name.strip() self.inout = inout.strip() def __str__(self):", "== other.base and self.size == other.size and self.precision == other.precision) class Parameter(object): def", "'' if not variable.value else variable.value[0] V = Variable(Type(base = token.type, size =", "----------------------------------------------------------------------------- # Copyright (c) 2015, <NAME> # Distributed under the (new) BSD License.", "= %s\" % self.value s += \";\" return s class Prototype(object): def __init__(self,", "+ Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\")) + Optional(LBRACK + SIZE + RBRACK)(\"size\")) #", "] . & ^ ! { }\") STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER = Literal(\"const\")", "= Parameter(type = Type(base = parameter.type, storage = parameter.storage, precision = parameter.precision, size", "value.strip() def __str__(self): s = str(self.type) + \" \" + self.alias if self.type.size:", "self.parameters = parameters self.code = code.strip() def __str__(self): s = str(self.type) + \"", "\" \" if self.name: s += \"%s\" % self.name if self.type.size: s +=", "LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN + ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON)) FUNCTION.ignore(cStyleComment) # Struct", "self.alias = name.strip() self.value = value.strip() def __str__(self): s = \"#define %s %s\"", "Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\")) + Optional(LBRACK + SIZE + RBRACK)(\"size\")) # Function", "Struct definitions & declarations # --------------------------------- STRUCT = ( Literal(\"struct\").suppress() + IDENTIFIER(\"type\") +", "+= \"%s \" % self.precision s += \"%s\" % self.base return s def", "name.strip() self.value = value.strip() def __str__(self): s = str(self.type) + \" \" +", "variables.append(V) # Struct definitions & declarations for (token, start, end) in STRUCT.scanString(code): S", "== other.precision) class Parameter(object): def __init__(self, type, name=None, inout=\"in\"): self.type = Type(type) self.name", "(c) 2015, <NAME> # Distributed under the (new) BSD License. See LICENSE.txt for", "inout\" \"float int void bool true false\" \"lowp mediump highp precision invariant\" \"discard", "nestedExpr('{','}') | IDENTIFIER | INTEGER | FLOAT | OPERATOR EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText)", "% self.inout s += str(self.type) + \" \" if self.name: s += \"%s\"", "return s def __eq__(self, other): return self.value == other.value class Struct(object): def __init__(self,", "Type(type) self.name = name.strip() self.alias = name.strip() self.value = value.strip() def __str__(self): s", "+ EXPR)(\"value\")) VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION = (STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") +", "+ Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\")) + Optional(LBRACK + SIZE +", "other): return self.value == other.value class Struct(object): def __init__(self, name, content): self.name =", "= token.type, size = size), name = variable.name, value = value) variables.append(V) #", "definitions & declarations # --------------------------------- STRUCT = ( Literal(\"struct\").suppress() + IDENTIFIER(\"type\") + nestedExpr(\"{\",", "start, end) in CONSTANT.scanString(code): C = Constant(name = token.name, value = token.value) constants.append(C)", "prototype and definitions for (token, start, end) in FUNCTION.scanString(code): parameters = [] for", "fvec3 fvec4 sampler1D sampler3D\" \"sampler1DShadow sampler2DShadow\" \"sampler2DRect sampler3DRect sampler2DRectShadow\" \"sizeof cast\" \"namespace using\")", "+ IDENTIFIER(\"type\") + IDENTIFIER(\"name\") + LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN + ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\"))", "+ Optional(LBRACK + SIZE + RBRACK)(\"size\") + Optional(EQUAL + EXPR)(\"value\")) VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True))", "= name.strip() self.content = content.strip() def __str__(self): s = \"struct %s %s;\" %", "Struct(object): def __init__(self, name, content): self.name = name.strip() self.content = content.strip() def __str__(self):", "s+= \", \" s += \");\" return s class Function(object): def __init__(self, type,", "| OPERATOR EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE = (IDENTIFIER(\"name\") + Optional(LBRACK + SIZE", "+ IDENTIFIER(\"name\") + LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN + ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON))", "Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON, COMMA = Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL = Literal(\"=\").suppress() SIZE = INTEGER", "precision = token.precision, size = size), name = variable.name, value = value) variables.append(V)", "list \"\"\" constants = [] structs = [] variables = [] prototypes= []", "+= \"%s \" % self.inout s += str(self.type) + \" \" if self.name:", "\");\" return s class Function(object): def __init__(self, type, name, parameters, code): self.type =", "continue do for while\" \"if else\" \"in out inout\" \"float int void bool", "invariant\" \"discard return\" \"mat2 mat3 mat4\" \"vec2 vec3 vec4 ivec2 ivec3 ivec4 bvec2", "GLSL source code into an abstract syntax list \"\"\" constants = [] structs", "\", \" s += \") \" s += self.code return s class Constant(object):", "code): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.parameters = parameters", "def __str__(self): s = \"\" if self.storage: s += \"%s \" % self.storage", "STRUCT = ( Literal(\"struct\").suppress() + IDENTIFIER(\"type\") + nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES) + SEMICOLON)", "s def __eq__(self, other): return self.value == other.value class Struct(object): def __init__(self, name,", "storage = token.storage, precision = token.precision, size = token.size) if token.code: F =", "parameters def __str__(self): s = str(self.type) + \" %s (\" % self.alias for", "IDENTIFIER(\"name\") + restOfLine(\"value\")) class Type(object): def __init__(self, base=None, storage=None, precision=None, size=None): if isinstance(base,", "precision.strip() def __str__(self): s = \"\" if self.storage: s += \"%s \" %", "Regex(\"(in|out|inout)[ \\t\\n]\") # Variable declarations # --------------------- PART = nestedExpr() | nestedExpr('{','}') |", "break continue do for while\" \"if else\" \"in out inout\" \"float int void", "s += \"[%s]\" % self.size return s class Variable(object): def __init__(self, type, name,", "= Type(type) self.name = name.strip() self.alias = name.strip() self.parameters = parameters def __str__(self):", "[] # Constants for (token, start, end) in CONSTANT.scanString(code): C = Constant(name =", "parameters: parameter.function = F else: P = Prototype(type = T, name = token.name,", "INTEGER | FLOAT | OPERATOR EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE = (IDENTIFIER(\"name\") +", "Variables for (token, start, end) in DECLARATION.scanString(code): for variable in token.variables: size =", "source code into an abstract syntax list \"\"\" constants = [] structs =", "s def __eq__(self, other): return (self.base == other.base and self.size == other.size and", "__init__(self, name, value): self.name = name.strip() self.alias = name.strip() self.value = value.strip() def", "^ ! { }\") STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER = Literal(\"const\") INVARIANT_QUALIFIER = Literal(\"invariant\")", "self.alias = name.strip() self.parameters = parameters def __str__(self): s = str(self.type) + \"", "-*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2015, <NAME> # Distributed", "ivec2 ivec3 ivec4 bvec2 bvec3 bvec4 sampler2D samplerCube\" \"struct\") reserved = (\"asm\" \"class", "DECLARATION.ignore(cStyleComment) # Function parameter # ------------------ PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\")", "# --------------------------------- STRUCT = ( Literal(\"struct\").suppress() + IDENTIFIER(\"type\") + nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES)", "= name.strip() self.alias = name.strip() self.parameters = parameters def __str__(self): s = str(self.type)", "2015, <NAME> # Distributed under the (new) BSD License. See LICENSE.txt for more", "INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL = Regex('(0[0-7]*)') INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)') INTEGER = INT_HEXADECIMAL |", "CONST_QUALIFIER = Literal(\"const\") INVARIANT_QUALIFIER = Literal(\"invariant\") PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[ \\t\\n]\")", "parameter in token.parameters: size = '' if not parameter.size else parameter.size[0] P =", "+ Optional(VARIABLES) + SEMICOLON) STRUCT.ignore(cStyleComment) # Constants # --------- CONSTANT = (Literal(\"#\").suppress() +", "start, end) in FUNCTION.scanString(code): parameters = [] for parameter in token.parameters: size =", "s = str(self.type) + \" \" + self.alias if self.type.size: s += \"[%s]\"", "= parameter.storage, precision = parameter.precision, size = parameter.size), name = parameter.name, inout =", "value): self.name = name.strip() self.alias = name.strip() self.value = value.strip() def __str__(self): s", "[] variables = [] prototypes= [] functions = [] # Constants for (token,", "PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\")) + Optional(LBRACK", "BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- from pyparsing import *", "= Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER = Literal(\"const\") INVARIANT_QUALIFIER = Literal(\"invariant\") PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER =", "prototypes.append(P) for parameter in parameters: parameter.function = None return constants, structs, variables, prototypes,", "parameters, code = token.code[0]) functions.append(F) for parameter in parameters: parameter.function = F else:", "import * keywords = (\"attribute const uniform varying break continue do for while\"", "\" s += \") \" s += self.code return s class Constant(object): def", "__str__(self): s = \"struct %s %s;\" % (self.name, self.content) return s def parse(code):", "value = '' if not variable.value else variable.value[0] V = Variable(Type(base = token.type,", "Literal(\"invariant\") PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[ \\t\\n]\") # Variable declarations # ---------------------", "(token, start, end) in DECLARATION.scanString(code): for variable in token.variables: size = '' if", "Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\")) + Optional(LBRACK + SIZE", "INT_OCTAL | INT_DECIMAL FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN = Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK, RBRACK", "union enum typedef template this packed\" \"goto switch default\" \"inline noinline volatile public", "+ SIZE + RBRACK)(\"size\")) # Function prototypes # ------------------- FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\") +", "not variable.value else variable.value[0] V = Variable(Type(base = token.type, size = size), name", "+ IDENTIFIER(\"type\") + nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES) + SEMICOLON) STRUCT.ignore(cStyleComment) # Constants #", "other.size and self.precision == other.precision) class Parameter(object): def __init__(self, type, name=None, inout=\"in\"): self.type", "class Constant(object): def __init__(self, name, value): self.name = name.strip() self.alias = name.strip() self.value", "if not variable.value else variable.value[0] V = Variable(Type(base = token.type, size = size),", "extern external\" \"interface flat long short double half fixed unsigned superp\" \"input output\"", "class Parameter(object): def __init__(self, type, name=None, inout=\"in\"): self.type = Type(type) self.name = name.strip()", "return s class Variable(object): def __init__(self, type, name, value=None): self.type = Type(type) self.name", "constants.append(C) # Variables for (token, start, end) in DECLARATION.scanString(code): for variable in token.variables:", "__eq__(self, other): return self.value == other.value class Struct(object): def __init__(self, name, content): self.name", "declarations # --------------------------------- STRUCT = ( Literal(\"struct\").suppress() + IDENTIFIER(\"type\") + nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") +", "unsigned superp\" \"input output\" \"hvec2 hvec3 hvec4 dvec2 dvec3 dvec4 fvec2 fvec3 fvec4", "s class Function(object): def __init__(self, type, name, parameters, code): self.type = Type(type) self.name", "def __init__(self, type, name, parameters, code): self.type = Type(type) self.name = name.strip() self.alias", "= '' if not variable.size else variable.size[0] value = '' if not variable.value", "restOfLine(\"value\")) class Type(object): def __init__(self, base=None, storage=None, precision=None, size=None): if isinstance(base, Type): other", "+ Optional(EQUAL + EXPR)(\"value\")) VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION = (STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") +", "+= str(self.type) + \" \" if self.name: s += \"%s\" % self.name if", "'' if not variable.value else variable.value[0] V = Variable(Type(base = token.type, storage =", "name, parameters, code): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.parameters", "in token.variables: size = '' if not variable.size else variable.size[0] value = ''", "code.strip() def __str__(self): s = str(self.type) + \" %s (\" % self.alias for", "other): return (self.base == other.base and self.size == other.size and self.precision == other.precision)", "if self.type.size: s += \"[%s]\" % self.type.size if self.value: s += \" =", "name = token.name, parameters = parameters) prototypes.append(P) for parameter in parameters: parameter.function =", "--------- CONSTANT = (Literal(\"#\").suppress() + Literal(\"define\").suppress() + IDENTIFIER(\"name\") + restOfLine(\"value\")) class Type(object): def", "(\" % self.alias for i, parameter in enumerate(self.parameters): s += str(parameter) if i", "storage=None, precision=None, size=None): if isinstance(base, Type): other = base self.base = other.base self.size", "+ \" %s (\" % self.alias for i, parameter in enumerate(self.parameters): s +=", "def __eq__(self, other): return self.value == other.value class Struct(object): def __init__(self, name, content):", "s += \" = %s\" % self.value s += \";\" return s class", "i, parameter in enumerate(self.parameters): s += str(parameter) if i < len(self.parameters)-1: s+= \",", "V = Variable(Type(base = token.type, storage = token.storage, precision = token.precision, size =", "name = variable.name, value = value) variables.append(V) # Functions prototype and definitions for", "s += str(parameter) if i < len(self.parameters)-1: s+= \", \" s += \");\"", "Variable(object): def __init__(self, type, name, value=None): self.type = Type(type) self.name = name.strip() self.alias", "LPAREN, RPAREN = Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK, RBRACK = Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE, RBRACE =", "def __init__(self, name, content): self.name = name.strip() self.content = content.strip() def __str__(self): s", "== other.value class Struct(object): def __init__(self, name, content): self.name = name.strip() self.content =", "in enumerate(self.parameters): s += str(parameter) if i < len(self.parameters)-1: s+= \", \" s", "INT_HEXADECIMAL | INT_OCTAL | INT_DECIMAL FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN = Literal(\"(\").suppress(), Literal(\")\").suppress()", "Literal(\"struct\").suppress() + IDENTIFIER(\"type\") + nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES) + SEMICOLON) STRUCT.ignore(cStyleComment) # Constants", "__init__(self, name, content): self.name = name.strip() self.content = content.strip() def __str__(self): s =", "= token.size) if token.code: F = Function( type = T, name = token.name,", "mat3 mat4\" \"vec2 vec3 vec4 ivec2 ivec3 ivec4 bvec2 bvec3 bvec4 sampler2D samplerCube\"", "= \"struct %s %s;\" % (self.name, self.content) return s def parse(code): \"\"\" Parse", "variable.name, value = value) variables.append(V) # Functions prototype and definitions for (token, start,", "DECLARATION.scanString(code): for variable in token.variables: size = '' if not variable.size else variable.size[0]", "= Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN, RPAREN = Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK, RBRACK = Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE,", "----------------------------------------------------------------------------- from pyparsing import * keywords = (\"attribute const uniform varying break continue", "+ Optional(LBRACK + SIZE + RBRACK)(\"size\")) # Function prototypes # ------------------- FUNCTION =", "\"struct\") reserved = (\"asm\" \"class union enum typedef template this packed\" \"goto switch", "Parameter(object): def __init__(self, type, name=None, inout=\"in\"): self.type = Type(type) self.name = name.strip() self.alias", "parameter in enumerate(self.parameters): s += str(parameter) if i < len(self.parameters)-1: s+= \", \"", "+ SEMICOLON) STRUCT.ignore(cStyleComment) # Constants # --------- CONSTANT = (Literal(\"#\").suppress() + Literal(\"define\").suppress() +", "= Variable(Type(base = token.type, size = size), name = variable.name, value = value)", "\"struct %s %s;\" % (self.name, self.content) return s def parse(code): \"\"\" Parse a", "= value) variables.append(V) # Functions prototype and definitions for (token, start, end) in", "s += \";\" return s class Prototype(object): def __init__(self, type, name, parameters): self.type", "token.type, storage = token.storage, precision = token.precision, size = token.size) if token.code: F", "(token, start, end) in CONSTANT.scanString(code): C = Constant(name = token.name, value = token.value)", "for (token, start, end) in DECLARATION.scanString(code): for variable in token.variables: size = ''", "half fixed unsigned superp\" \"input output\" \"hvec2 hvec3 hvec4 dvec2 dvec3 dvec4 fvec2", "INT_OCTAL = Regex('(0[0-7]*)') INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)') INTEGER = INT_HEXADECIMAL | INT_OCTAL | INT_DECIMAL", "IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\")) + Optional(LBRACK + SIZE + RBRACK)(\"size\")) # Function prototypes #", "EXPR)(\"value\")) VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION = (STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + VARIABLES", "FUNCTION.ignore(cStyleComment) # Struct definitions & declarations # --------------------------------- STRUCT = ( Literal(\"struct\").suppress() +", "Variable(Type(base = token.type, storage = token.storage, precision = token.precision, size = size), name", "C = Constant(name = token.name, value = token.value) constants.append(C) # Variables for (token,", "\"\"\" Parse a GLSL source code into an abstract syntax list \"\"\" constants", "for more info. # ----------------------------------------------------------------------------- from pyparsing import * keywords = (\"attribute const", "token.code[0]) functions.append(F) for parameter in parameters: parameter.function = F else: P = Prototype(type", "= size), name = variable.name, value = value) variables.append(V) # Functions prototype and", "\", \" s += \");\" return s class Function(object): def __init__(self, type, name,", "bvec3 bvec4 sampler2D samplerCube\" \"struct\") reserved = (\"asm\" \"class union enum typedef template", "storage.strip() self.precision = precision.strip() def __str__(self): s = \"\" if self.storage: s +=", "def __eq__(self, other): return (self.base == other.base and self.size == other.size and self.precision", "+= str(parameter) if i < len(self.parameters)-1: s+= \", \" s += \") \"", "% self.storage if self.precision: s += \"%s \" % self.precision s += \"%s\"", "= Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON, COMMA = Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL = Literal(\"=\").suppress() SIZE =", "# ----------------------------------------------------------------------------- # Copyright (c) 2015, <NAME> # Distributed under the (new) BSD", "Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK, RBRACK = Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE, RBRACE = Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON,", "a GLSL source code into an abstract syntax list \"\"\" constants = []", "= Variable(Type(base = token.type, storage = token.storage, precision = token.precision, size = size),", "self.alias if self.type.size: s += \"[%s]\" % self.type.size if self.value: s += \"", "# Constants for (token, start, end) in CONSTANT.scanString(code): C = Constant(name = token.name,", "true false\" \"lowp mediump highp precision invariant\" \"discard return\" \"mat2 mat3 mat4\" \"vec2", "\"goto switch default\" \"inline noinline volatile public static extern external\" \"interface flat long", "+ RPAREN + ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON)) FUNCTION.ignore(cStyleComment) # Struct definitions & declarations", "\"namespace using\") IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL = Regex('(0[0-7]*)') INT_HEXADECIMAL =", "self.inout = inout.strip() def __str__(self): s = \"\" if self.inout: s += \"%s", "sampler3DRect sampler2DRectShadow\" \"sizeof cast\" \"namespace using\") IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL", "= value.strip() def __str__(self): s = \"#define %s %s\" % (self.alias, self.value) return", "Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN + ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON)) FUNCTION.ignore(cStyleComment) # Struct definitions &", "\" % self.inout s += str(self.type) + \" \" if self.name: s +=", "= token.content[0]) structs.append(S) for variable in token.variables: size = '' if not variable.size", "flat long short double half fixed unsigned superp\" \"input output\" \"hvec2 hvec3 hvec4", "= Literal(\"invariant\") PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[ \\t\\n]\") # Variable declarations #", "= size.strip() self.storage = storage.strip() self.precision = precision.strip() def __str__(self): s = \"\"", "i < len(self.parameters)-1: s+= \", \" s += \") \" s += self.code", "Literal(\"}\").suppress() SEMICOLON, COMMA = Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL = Literal(\"=\").suppress() SIZE = INTEGER |", "other.value class Struct(object): def __init__(self, name, content): self.name = name.strip() self.content = content.strip()", "IDENTIFIER(\"name\") + LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN + ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON)) FUNCTION.ignore(cStyleComment)", "variables.append(V) # Functions prototype and definitions for (token, start, end) in FUNCTION.scanString(code): parameters", "Struct definitions & declarations for (token, start, end) in STRUCT.scanString(code): S = Struct(name", "other.base and self.size == other.size and self.precision == other.precision) class Parameter(object): def __init__(self,", "'' if not variable.size else variable.size[0] value = '' if not variable.value else", "| INTEGER | FLOAT | OPERATOR EXPR = delimitedList(PART, delim=Empty()).setParseAction(keepOriginalText) VARIABLE = (IDENTIFIER(\"name\")", "< len(self.parameters)-1: s+= \", \" s += \");\" return s class Function(object): def", "(self.base == other.base and self.size == other.size and self.precision == other.precision) class Parameter(object):", "s += \"[%s]\" % self.type.size if self.value: s += \" = %s\" %", "self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.value = value.strip() def", "sampler3D\" \"sampler1DShadow sampler2DShadow\" \"sampler2DRect sampler3DRect sampler2DRectShadow\" \"sizeof cast\" \"namespace using\") IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*')", "size), name = variable.name, value = value) variables.append(V) # Functions prototype and definitions", "size = '' if not parameter.size else parameter.size[0] P = Parameter(type = Type(base", "token.value) constants.append(C) # Variables for (token, start, end) in DECLARATION.scanString(code): for variable in", "other.precision else: self.base = base.strip() self.size = size.strip() self.storage = storage.strip() self.precision =", "(new) BSD License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- from pyparsing import", "[ ] . & ^ ! { }\") STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER =", "= other.size self.storage = other.storage self.precision = other.precision else: self.base = base.strip() self.size", "# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2015, <NAME> #", "% self.alias for i, parameter in enumerate(self.parameters): s += str(parameter) if i <", "& declarations # --------------------------------- STRUCT = ( Literal(\"struct\").suppress() + IDENTIFIER(\"type\") + nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\")", "bool true false\" \"lowp mediump highp precision invariant\" \"discard return\" \"mat2 mat3 mat4\"", "parameters self.code = code.strip() def __str__(self): s = str(self.type) + \" %s (\"", "precision = parameter.precision, size = parameter.size), name = parameter.name, inout = parameter.inout) parameters.append(P)", "parameters = parameters) prototypes.append(P) for parameter in parameters: parameter.function = None return constants,", "superp\" \"input output\" \"hvec2 hvec3 hvec4 dvec2 dvec3 dvec4 fvec2 fvec3 fvec4 sampler1D", "self.name: s += \"%s\" % self.name if self.type.size: s += \"[%s]\" % self.size", "= size), name = variable.name, value = value) variables.append(V) # Struct definitions &", "Variable declarations # --------------------- PART = nestedExpr() | nestedExpr('{','}') | IDENTIFIER | INTEGER", "oneOf(\"+ - * / [ ] . & ^ ! { }\") STORAGE_QUALIFIER", "int void bool true false\" \"lowp mediump highp precision invariant\" \"discard return\" \"mat2", "= name.strip() self.value = value.strip() def __str__(self): s = \"#define %s %s\" %", "SIZE + RBRACK)(\"size\") + Optional(EQUAL + EXPR)(\"value\")) VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION = (STORAGE_QUALIFIER(\"storage\")", "start, end) in STRUCT.scanString(code): S = Struct(name = token.type, content = token.content[0]) structs.append(S)", "token.storage, precision = token.precision, size = token.size) if token.code: F = Function( type", "Literal(\"=\").suppress() SIZE = INTEGER | IDENTIFIER OPERATOR = oneOf(\"+ - * / [", "def __str__(self): s = str(self.type) + \" \" + self.alias if self.type.size: s", "COMMA = Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL = Literal(\"=\").suppress() SIZE = INTEGER | IDENTIFIER OPERATOR", "info. # ----------------------------------------------------------------------------- from pyparsing import * keywords = (\"attribute const uniform varying", "\"[%s]\" % self.type.size if self.value: s += \" = %s\" % self.value s", "for while\" \"if else\" \"in out inout\" \"float int void bool true false\"", "+= \"%s\" % self.base return s def __eq__(self, other): return (self.base == other.base", "[] prototypes= [] functions = [] # Constants for (token, start, end) in", "Literal(\"const\") INVARIANT_QUALIFIER = Literal(\"invariant\") PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[ \\t\\n]\") # Variable", "token.size) if token.code: F = Function( type = T, name = token.name, parameters", "str(parameter) if i < len(self.parameters)-1: s+= \", \" s += \") \" s", "+= self.code return s class Constant(object): def __init__(self, name, value): self.name = name.strip()", "value=None): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.value = value.strip()", "str(self.type) + \" \" + self.alias if self.type.size: s += \"[%s]\" % self.type.size", "* keywords = (\"attribute const uniform varying break continue do for while\" \"if", "= variable.name, value = value) variables.append(V) # Functions prototype and definitions for (token,", "name.strip() self.alias = name.strip() self.value = value.strip() def __str__(self): s = \"#define %s", "value) variables.append(V) # Struct definitions & declarations for (token, start, end) in STRUCT.scanString(code):", "template this packed\" \"goto switch default\" \"inline noinline volatile public static extern external\"", "Function(object): def __init__(self, type, name, parameters, code): self.type = Type(type) self.name = name.strip()", "class Struct(object): def __init__(self, name, content): self.name = name.strip() self.content = content.strip() def", "packed\" \"goto switch default\" \"inline noinline volatile public static extern external\" \"interface flat", "\"in out inout\" \"float int void bool true false\" \"lowp mediump highp precision", "self.size return s class Variable(object): def __init__(self, type, name, value=None): self.type = Type(type)", "this packed\" \"goto switch default\" \"inline noinline volatile public static extern external\" \"interface", "\"%s\" % self.name if self.type.size: s += \"[%s]\" % self.size return s class", "= Type(base = parameter.type, storage = parameter.storage, precision = parameter.precision, size = parameter.size),", "token.precision, size = token.size) if token.code: F = Function( type = T, name", "--------------------------------- STRUCT = ( Literal(\"struct\").suppress() + IDENTIFIER(\"type\") + nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES) +", "\"vec2 vec3 vec4 ivec2 ivec3 ivec4 bvec2 bvec3 bvec4 sampler2D samplerCube\" \"struct\") reserved", "* / [ ] . & ^ ! { }\") STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\")", "+ ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON)) FUNCTION.ignore(cStyleComment) # Struct definitions & declarations # ---------------------------------", "= (Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + IDENTIFIER(\"name\") + LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\") +", "= name.strip() self.alias = name.strip() self.parameters = parameters self.code = code.strip() def __str__(self):", "SIZE + RBRACK)(\"size\")) # Function prototypes # ------------------- FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\")", "<gh_stars>1-10 # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2015, <NAME>", "self.precision = precision.strip() def __str__(self): s = \"\" if self.storage: s += \"%s", "parameter.name, inout = parameter.inout) parameters.append(P) T = Type(base = token.type, storage = token.storage,", "(token, start, end) in FUNCTION.scanString(code): parameters = [] for parameter in token.parameters: size", "Function parameter # ------------------ PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\")", "self.storage if self.precision: s += \"%s \" % self.precision s += \"%s\" %", "end) in CONSTANT.scanString(code): C = Constant(name = token.name, value = token.value) constants.append(C) #", "\") \" s += self.code return s class Constant(object): def __init__(self, name, value):", "\"lowp mediump highp precision invariant\" \"discard return\" \"mat2 mat3 mat4\" \"vec2 vec3 vec4", "isinstance(base, Type): other = base self.base = other.base self.size = other.size self.storage =", "= Regex('(0[0-7]*)') INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)') INTEGER = INT_HEXADECIMAL | INT_OCTAL | INT_DECIMAL FLOAT", "return s class Prototype(object): def __init__(self, type, name, parameters): self.type = Type(type) self.name", "Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL = Regex('(0[0-7]*)') INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)') INTEGER = INT_HEXADECIMAL", "= parameter.name, inout = parameter.inout) parameters.append(P) T = Type(base = token.type, storage =", "in token.parameters: size = '' if not parameter.size else parameter.size[0] P = Parameter(type", "public static extern external\" \"interface flat long short double half fixed unsigned superp\"", "__init__(self, type, name, parameters, code): self.type = Type(type) self.name = name.strip() self.alias =", "variable.value else variable.value[0] V = Variable(Type(base = token.type, storage = token.storage, precision =", "SEMICOLON) STRUCT.ignore(cStyleComment) # Constants # --------- CONSTANT = (Literal(\"#\").suppress() + Literal(\"define\").suppress() + IDENTIFIER(\"name\")", "name, content): self.name = name.strip() self.content = content.strip() def __str__(self): s = \"struct", "str(self.type) + \" \" if self.name: s += \"%s\" % self.name if self.type.size:", "# ----------------------------------------------------------------------------- from pyparsing import * keywords = (\"attribute const uniform varying break", "long short double half fixed unsigned superp\" \"input output\" \"hvec2 hvec3 hvec4 dvec2", "= \"#define %s %s\" % (self.alias, self.value) return s def __eq__(self, other): return", "IDENTIFIER OPERATOR = oneOf(\"+ - * / [ ] . & ^ !", "self.alias = name.strip() self.parameters = parameters self.code = code.strip() def __str__(self): s =", "= \"\" if self.inout: s += \"%s \" % self.inout s += str(self.type)", "dvec3 dvec4 fvec2 fvec3 fvec4 sampler1D sampler3D\" \"sampler1DShadow sampler2DShadow\" \"sampler2DRect sampler3DRect sampler2DRectShadow\" \"sizeof", "self.value: s += \" = %s\" % self.value s += \";\" return s", "len(self.parameters)-1: s+= \", \" s += \");\" return s class Function(object): def __init__(self,", "+ Optional(IDENTIFIER(\"name\")) + Optional(LBRACK + SIZE + RBRACK)(\"size\")) # Function prototypes # -------------------", "parameter.storage, precision = parameter.precision, size = parameter.size), name = parameter.name, inout = parameter.inout)", "= '' if not parameter.size else parameter.size[0] P = Parameter(type = Type(base =", "Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + VARIABLES + SEMICOLON) DECLARATION.ignore(cStyleComment) # Function parameter # ------------------", "Optional(VARIABLES) + SEMICOLON) STRUCT.ignore(cStyleComment) # Constants # --------- CONSTANT = (Literal(\"#\").suppress() + Literal(\"define\").suppress()", "= Struct(name = token.type, content = token.content[0]) structs.append(S) for variable in token.variables: size", "= Literal(\"(\").suppress(), Literal(\")\").suppress() LBRACK, RBRACK = Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE, RBRACE = Literal(\"{\").suppress(), Literal(\"}\").suppress()", "definitions for (token, start, end) in FUNCTION.scanString(code): parameters = [] for parameter in", "RBRACE = Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON, COMMA = Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL = Literal(\"=\").suppress() SIZE", "self.type.size if self.value: s += \" = %s\" % self.value s += \";\"", "+ SIZE + RBRACK)(\"size\") + Optional(EQUAL + EXPR)(\"value\")) VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION =", "name, parameters): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.parameters =", "= Constant(name = token.name, value = token.value) constants.append(C) # Variables for (token, start,", "SIZE = INTEGER | IDENTIFIER OPERATOR = oneOf(\"+ - * / [ ]", "if not parameter.size else parameter.size[0] P = Parameter(type = Type(base = parameter.type, storage", "code = token.code[0]) functions.append(F) for parameter in parameters: parameter.function = F else: P", "CONSTANT.scanString(code): C = Constant(name = token.name, value = token.value) constants.append(C) # Variables for", "# ------------------ PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\"))", "Type(object): def __init__(self, base=None, storage=None, precision=None, size=None): if isinstance(base, Type): other = base", "RBRACK)(\"size\")) # Function prototypes # ------------------- FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\")", "name.strip() self.alias = name.strip() self.inout = inout.strip() def __str__(self): s = \"\" if", "# Struct definitions & declarations # --------------------------------- STRUCT = ( Literal(\"struct\").suppress() + IDENTIFIER(\"type\")", "LBRACE, RBRACE = Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON, COMMA = Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL = Literal(\"=\").suppress()", "other.precision) class Parameter(object): def __init__(self, type, name=None, inout=\"in\"): self.type = Type(type) self.name =", "= [] # Constants for (token, start, end) in CONSTANT.scanString(code): C = Constant(name", "Type(type) self.name = name.strip() self.alias = name.strip() self.inout = inout.strip() def __str__(self): s", "\"mat2 mat3 mat4\" \"vec2 vec3 vec4 ivec2 ivec3 ivec4 bvec2 bvec3 bvec4 sampler2D", "= delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION = (STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + VARIABLES + SEMICOLON)", "sampler2DRectShadow\" \"sizeof cast\" \"namespace using\") IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL =", "s += \"%s \" % self.storage if self.precision: s += \"%s \" %", "\";\" return s class Prototype(object): def __init__(self, type, name, parameters): self.type = Type(type)", "= storage.strip() self.precision = precision.strip() def __str__(self): s = \"\" if self.storage: s", "s += \"%s\" % self.name if self.type.size: s += \"[%s]\" % self.size return", "STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER = Literal(\"const\") INVARIANT_QUALIFIER = Literal(\"invariant\") PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER", "License. See LICENSE.txt for more info. # ----------------------------------------------------------------------------- from pyparsing import * keywords", "V = Variable(Type(base = token.type, size = size), name = variable.name, value =", "\"%s \" % self.precision s += \"%s\" % self.base return s def __eq__(self,", "VARIABLE = (IDENTIFIER(\"name\") + Optional(LBRACK + SIZE + RBRACK)(\"size\") + Optional(EQUAL + EXPR)(\"value\"))", "'' if not parameter.size else parameter.size[0] P = Parameter(type = Type(base = parameter.type,", "Parse a GLSL source code into an abstract syntax list \"\"\" constants =", "\" \" + self.alias if self.type.size: s += \"[%s]\" % self.type.size if self.value:", "RBRACK)(\"size\") + Optional(EQUAL + EXPR)(\"value\")) VARIABLES = delimitedList(VARIABLE.setResultsName(\"variables\",listAllMatches=True)) DECLARATION = (STORAGE_QUALIFIER(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\")", "enumerate(self.parameters): s += str(parameter) if i < len(self.parameters)-1: s+= \", \" s +=", "= name.strip() self.alias = name.strip() self.inout = inout.strip() def __str__(self): s = \"\"", "functions.append(F) for parameter in parameters: parameter.function = F else: P = Prototype(type =", "coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) 2015, <NAME> # Distributed under", "%s %s;\" % (self.name, self.content) return s def parse(code): \"\"\" Parse a GLSL", "= Regex('(0[xX][0-9a-fA-F]*)') INTEGER = INT_HEXADECIMAL | INT_OCTAL | INT_DECIMAL FLOAT = Regex('[+-]?(((\\d+\\.\\d*)|(\\d*\\.\\d+))([eE][-+]?\\d+)?)|(\\d*[eE][+-]?\\d+)') LPAREN,", "# Function parameter # ------------------ PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\") +", "self.name = name.strip() self.alias = name.strip() self.inout = inout.strip() def __str__(self): s =", "storage = parameter.storage, precision = parameter.precision, size = parameter.size), name = parameter.name, inout", "while\" \"if else\" \"in out inout\" \"float int void bool true false\" \"lowp", "fvec2 fvec3 fvec4 sampler1D sampler3D\" \"sampler1DShadow sampler2DShadow\" \"sampler2DRect sampler3DRect sampler2DRectShadow\" \"sizeof cast\" \"namespace", "= parameter.size), name = parameter.name, inout = parameter.inout) parameters.append(P) T = Type(base =", "= value.strip() def __str__(self): s = str(self.type) + \" \" + self.alias if", "parameter in parameters: parameter.function = F else: P = Prototype(type = T, name", "+ Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + VARIABLES + SEMICOLON) DECLARATION.ignore(cStyleComment) # Function parameter #", "= '' if not variable.value else variable.value[0] V = Variable(Type(base = token.type, size", "= token.value) constants.append(C) # Variables for (token, start, end) in DECLARATION.scanString(code): for variable", "size.strip() self.storage = storage.strip() self.precision = precision.strip() def __str__(self): s = \"\" if", "value) variables.append(V) # Functions prototype and definitions for (token, start, end) in FUNCTION.scanString(code):", "= token.name, parameters = parameters, code = token.code[0]) functions.append(F) for parameter in parameters:", "__init__(self, type, name, value=None): self.type = Type(type) self.name = name.strip() self.alias = name.strip()", "self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.inout = inout.strip() def", "\"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES) + SEMICOLON) STRUCT.ignore(cStyleComment) # Constants # --------- CONSTANT = (Literal(\"#\").suppress()", "for parameter in parameters: parameter.function = None return constants, structs, variables, prototypes, functions", "+ LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN + ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON)) FUNCTION.ignore(cStyleComment) #", "= str(self.type) + \" \" + self.alias if self.type.size: s += \"[%s]\" %", "\"if else\" \"in out inout\" \"float int void bool true false\" \"lowp mediump", "SEMICOLON) DECLARATION.ignore(cStyleComment) # Function parameter # ------------------ PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") +", "s += str(self.type) + \" \" if self.name: s += \"%s\" % self.name", "/ [ ] . & ^ ! { }\") STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER", "Parameter(type = Type(base = parameter.type, storage = parameter.storage, precision = parameter.precision, size =", "Type(base = parameter.type, storage = parameter.storage, precision = parameter.precision, size = parameter.size), name", "= variable.name, value = value) variables.append(V) # Struct definitions & declarations for (token,", "+ self.alias if self.type.size: s += \"[%s]\" % self.type.size if self.value: s +=", "% self.precision s += \"%s\" % self.base return s def __eq__(self, other): return", "Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL = Regex('(0[0-7]*)') INT_HEXADECIMAL = Regex('(0[xX][0-9a-fA-F]*)') INTEGER = INT_HEXADECIMAL | INT_OCTAL |", "name=None, inout=\"in\"): self.type = Type(type) self.name = name.strip() self.alias = name.strip() self.inout =", "def parse(code): \"\"\" Parse a GLSL source code into an abstract syntax list", "\"sizeof cast\" \"namespace using\") IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL = Regex('([+-]?(([1-9][0-9]*)|0+))') INT_OCTAL = Regex('(0[0-7]*)')", "return s def parse(code): \"\"\" Parse a GLSL source code into an abstract", "Literal(\"]\").suppress() LBRACE, RBRACE = Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON, COMMA = Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL =", "value = token.value) constants.append(C) # Variables for (token, start, end) in DECLARATION.scanString(code): for", "! { }\") STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER = Literal(\"const\") INVARIANT_QUALIFIER = Literal(\"invariant\") PRECISION_QUALIFIER", "sampler2DShadow\" \"sampler2DRect sampler3DRect sampler2DRectShadow\" \"sizeof cast\" \"namespace using\") IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL =", "PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\") PARAMETER_QUALIFIER = Regex(\"(in|out|inout)[ \\t\\n]\") # Variable declarations # --------------------- PART", "size), name = variable.name, value = value) variables.append(V) # Struct definitions & declarations", "Type(base = token.type, storage = token.storage, precision = token.precision, size = token.size) if", "= parameters, code = token.code[0]) functions.append(F) for parameter in parameters: parameter.function = F", "token.type, content = token.content[0]) structs.append(S) for variable in token.variables: size = '' if", ". & ^ ! { }\") STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER = Literal(\"const\") INVARIANT_QUALIFIER", "other.size self.storage = other.storage self.precision = other.precision else: self.base = base.strip() self.size =", "precision invariant\" \"discard return\" \"mat2 mat3 mat4\" \"vec2 vec3 vec4 ivec2 ivec3 ivec4", "self.content = content.strip() def __str__(self): s = \"struct %s %s;\" % (self.name, self.content)", "token.name, parameters = parameters) prototypes.append(P) for parameter in parameters: parameter.function = None return", "(IDENTIFIER(\"name\") + Optional(LBRACK + SIZE + RBRACK)(\"size\") + Optional(EQUAL + EXPR)(\"value\")) VARIABLES =", "See LICENSE.txt for more info. # ----------------------------------------------------------------------------- from pyparsing import * keywords =", "token.precision, size = size), name = variable.name, value = value) variables.append(V) # Struct", "= Literal(\"=\").suppress() SIZE = INTEGER | IDENTIFIER OPERATOR = oneOf(\"+ - * /", "variable.value else variable.value[0] V = Variable(Type(base = token.type, size = size), name =", "other = base self.base = other.base self.size = other.size self.storage = other.storage self.precision", "self.size == other.size and self.precision == other.precision) class Parameter(object): def __init__(self, type, name=None,", "\"sampler1DShadow sampler2DShadow\" \"sampler2DRect sampler3DRect sampler2DRectShadow\" \"sizeof cast\" \"namespace using\") IDENTIFIER = Regex('[a-zA-Z_][a-zA-Z_0-9]*') INT_DECIMAL", "self.type.size: s += \"[%s]\" % self.type.size if self.value: s += \" = %s\"", "+ VARIABLES + SEMICOLON) DECLARATION.ignore(cStyleComment) # Function parameter # ------------------ PARAMETER = Group(Optional(STORAGE_QUALIFIER)(\"storage\")", "((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) | SEMICOLON)) FUNCTION.ignore(cStyleComment) # Struct definitions & declarations # --------------------------------- STRUCT", "for i, parameter in enumerate(self.parameters): s += str(parameter) if i < len(self.parameters)-1: s+=", "Optional(PRECISION_QUALIFIER)(\"precision\") + Optional(PARAMETER_QUALIFIER)(\"inout\") + IDENTIFIER(\"type\") + Optional(IDENTIFIER(\"name\")) + Optional(LBRACK + SIZE + RBRACK)(\"size\"))", "self.base return s def __eq__(self, other): return (self.base == other.base and self.size ==", "inout.strip() def __str__(self): s = \"\" if self.inout: s += \"%s \" %", "F = Function( type = T, name = token.name, parameters = parameters, code", "else\" \"in out inout\" \"float int void bool true false\" \"lowp mediump highp", "end) in STRUCT.scanString(code): S = Struct(name = token.type, content = token.content[0]) structs.append(S) for", "ivec3 ivec4 bvec2 bvec3 bvec4 sampler2D samplerCube\" \"struct\") reserved = (\"asm\" \"class union", "sampler2D samplerCube\" \"struct\") reserved = (\"asm\" \"class union enum typedef template this packed\"", "= parameters self.code = code.strip() def __str__(self): s = str(self.type) + \" %s", "in parameters: parameter.function = F else: P = Prototype(type = T, name =", "default\" \"inline noinline volatile public static extern external\" \"interface flat long short double", "for (token, start, end) in FUNCTION.scanString(code): parameters = [] for parameter in token.parameters:", "s = \"#define %s %s\" % (self.alias, self.value) return s def __eq__(self, other):", "= token.type, content = token.content[0]) structs.append(S) for variable in token.variables: size = ''", "value = value) variables.append(V) # Functions prototype and definitions for (token, start, end)", "return self.value == other.value class Struct(object): def __init__(self, name, content): self.name = name.strip()", "}\") STORAGE_QUALIFIER = Regex(\"const|varying|uniform|attribute\") CONST_QUALIFIER = Literal(\"const\") INVARIANT_QUALIFIER = Literal(\"invariant\") PRECISION_QUALIFIER = Regex(\"lowp|mediump|highp\")", "Literal(\"[\").suppress(), Literal(\"]\").suppress() LBRACE, RBRACE = Literal(\"{\").suppress(), Literal(\"}\").suppress() SEMICOLON, COMMA = Literal(\";\").suppress(), Literal(\",\").suppress() EQUAL", "IDENTIFIER(\"type\") + IDENTIFIER(\"name\") + LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN + ((nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"code\")) |", "dvec4 fvec2 fvec3 fvec4 sampler1D sampler3D\" \"sampler1DShadow sampler2DShadow\" \"sampler2DRect sampler3DRect sampler2DRectShadow\" \"sizeof cast\"", "code into an abstract syntax list \"\"\" constants = [] structs = []", "SEMICOLON)) FUNCTION.ignore(cStyleComment) # Struct definitions & declarations # --------------------------------- STRUCT = ( Literal(\"struct\").suppress()", "\"inline noinline volatile public static extern external\" \"interface flat long short double half", "dvec2 dvec3 dvec4 fvec2 fvec3 fvec4 sampler1D sampler3D\" \"sampler1DShadow sampler2DShadow\" \"sampler2DRect sampler3DRect sampler2DRectShadow\"", "self.code return s class Constant(object): def __init__(self, name, value): self.name = name.strip() self.alias", "Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + IDENTIFIER(\"name\") + LPAREN + Optional(delimitedList(PARAMETER))(\"parameters\") + RPAREN + ((nestedExpr(\"{\",", "parameter.precision, size = parameter.size), name = parameter.name, inout = parameter.inout) parameters.append(P) T =", "token.name, parameters = parameters, code = token.code[0]) functions.append(F) for parameter in parameters: parameter.function", "token.parameters: size = '' if not parameter.size else parameter.size[0] P = Parameter(type =", "def __init__(self, type, name=None, inout=\"in\"): self.type = Type(type) self.name = name.strip() self.alias =", "% self.name if self.type.size: s += \"[%s]\" % self.size return s class Variable(object):", "( Literal(\"struct\").suppress() + IDENTIFIER(\"type\") + nestedExpr(\"{\", \"}\").setParseAction(keepOriginalText)(\"content\") + Optional(VARIABLES) + SEMICOLON) STRUCT.ignore(cStyleComment) #", "precision = token.precision, size = token.size) if token.code: F = Function( type =", "hvec3 hvec4 dvec2 dvec3 dvec4 fvec2 fvec3 fvec4 sampler1D sampler3D\" \"sampler1DShadow sampler2DShadow\" \"sampler2DRect", "base self.base = other.base self.size = other.size self.storage = other.storage self.precision = other.precision", "IDENTIFIER(\"type\") + VARIABLES + SEMICOLON) DECLARATION.ignore(cStyleComment) # Function parameter # ------------------ PARAMETER =", "= name.strip() self.alias = name.strip() self.value = value.strip() def __str__(self): s = \"#define", "not variable.size else variable.size[0] value = '' if not variable.value else variable.value[0] V", "parse(code): \"\"\" Parse a GLSL source code into an abstract syntax list \"\"\"", "variable.size[0] value = '' if not variable.value else variable.value[0] V = Variable(Type(base =", "% self.size return s class Variable(object): def __init__(self, type, name, value=None): self.type =", "if not variable.size else variable.size[0] value = '' if not variable.value else variable.value[0]", "% self.type.size if self.value: s += \" = %s\" % self.value s +=", "def __str__(self): s = str(self.type) + \" %s (\" % self.alias for i,", "def __init__(self, type, name, parameters): self.type = Type(type) self.name = name.strip() self.alias =", "------------------- FUNCTION = (Optional(STORAGE_QUALIFIER)(\"storage\") + Optional(PRECISION_QUALIFIER)(\"precision\") + IDENTIFIER(\"type\") + IDENTIFIER(\"name\") + LPAREN +", "def __str__(self): s = \"\" if self.inout: s += \"%s \" % self.inout" ]
[ "the two E_join = append(E_low[0:260],E_high[9:]) Phi_join = append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate to create new", "a nice distribution, but the recoil file ends up being huge (which is", "v_lab/sqrt(sum(v_lab**2.0)) x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i] = sum(v_lab*x_rec) # Binning costhmin =", "1.0e4 E_fine = linspace(E_join[0],E_nu_max,nfine) Phi_fine = interp(E_fine,E_join,Phi_join) # Generate ngen initial energies and", "large number to # make a nice plot of energy/phi/costh distribution. So everytime", "sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i] = sum(v_lab*x_rec) # Binning costhmin = 0.0 costh_edges = sqrt(linspace(0.0,1.0,ne+1))", "import os import sys sys.path.append('../src') from numpy import * from numpy import random", "= histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 = R_Atm*R2/sum(sum(R2)) DAT1 = vstack((costh_centers,E_r_centers,R1)) DAT2 = vstack((costh_centers,E_r_centers,R2)) recoildat_fname1", "we need a large number to # make a nice plot of energy/phi/costh", "print('Nucleus = ',Nuc.Name) if Nuc.Name=='Xe': E_min = 2.0 E_max = 200.0 elif Nuc.Name=='Ar':", "if Nuc.Name=='Xe': E_min = 2.0 E_max = 200.0 elif Nuc.Name=='Ar': E_min = 20.0", "NeutrinoFuncs import * from LabFuncs import * # This file doesn't save all", "GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter each neutrino E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window and get angles", "= squeeze(sum(sum(Phi_tot,0),0)) ###### Load low energy FLUKA data dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 =", "R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 = R_Atm*R1/sum(sum(R1)) R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2", "= sqrt(linspace(0.0,1.0,ne+1)) costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges = logspace(log10(E_min),log10(E_max),ne+1) E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] =", "# Scatter each neutrino E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window and get angles mask_window", "run the new distribution is merged with a previous one to make it", "loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low = dat1[:,0] Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join the two E_join =", "= dat1[:,0] Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join the two E_join = append(E_low[0:260],E_high[9:]) Phi_join", "E_max = 200.0 elif Nuc.Name=='Ar': E_min = 20.0 E_max = 400.0 #==============================================================================# ngen", "not in the git repository) #==============================================================================# # Input Nuc = eval(sys.argv[1]) print('Nucleus =", "logspace(log10(E_min),log10(E_max),ne+1) E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] = meshgrid(E_r_centers,costh_centers) eff2 = efficiency(Nuc,E) # Atmospheric neutrino", "sys sys.path.append('../src') from numpy import * from numpy import random from Params import", "to run this file (both Xe131 and Ar40) around 10 times to get", "Ar40) around 10 times to get # a nice distribution, but the recoil", "R_Atm*R2/sum(sum(R2)) DAT1 = vstack((costh_centers,E_r_centers,R1)) DAT2 = vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt'", "2.0 E_max = 200.0 elif Nuc.Name=='Ar': E_min = 20.0 E_max = 400.0 #==============================================================================#", "E_min = 20.0 E_max = 400.0 #==============================================================================# ngen = 1000000 fname = 'AtmNu_GranSasso_SolarMin.d'", "from numpy import * from numpy import random from Params import * from", "loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low = dat1[:,0] Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1]", "make a nice plot of energy/phi/costh distribution. So everytime this file # is", "os import sys sys.path.append('../src') from numpy import * from numpy import random from", "= 'AtmNu_GranSasso_SolarMin.d' ne = 20 #### Load high energy data Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname)", "costh_r_gen_2[i] = sum(v_lab*x_rec) # Binning costhmin = 0.0 costh_edges = sqrt(linspace(0.0,1.0,ne+1)) costh_centers =", "DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('merged') else: savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('overwritten') else: savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2)", "= costh_r_gen[mask_window] nleft = size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating Cygnus angles') costh_r_gen_2 = zeros(shape=nleft) t_gen", "ne = 20 #### Load high energy data Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname) Phi_high =", "distribution. So everytime this file # is run the new distribution is merged", "mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen = E_r_gen[mask_window] phi_r_gen = phi_r_gen[mask_window] costh_r_gen = costh_r_gen[mask_window] nleft", "#==============================================================================# # Input Nuc = eval(sys.argv[1]) print('Nucleus = ',Nuc.Name) if Nuc.Name=='Xe': E_min =", "each time. # I needed to run this file (both Xe131 and Ar40)", "each neutrino E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window and get angles mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min)", "Atmospheric neutrino rate R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 = R_Atm*R1/sum(sum(R1))", "###### Load low energy FLUKA data dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3", "ngen initial energies and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter each neutrino E_r_gen,phi_r_gen,costh_r_gen", "nice plot of energy/phi/costh distribution. So everytime this file # is run the", "E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter each neutrino E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window and", "R2 = R_Atm*R2/sum(sum(R2)) DAT1 = vstack((costh_centers,E_r_centers,R1)) DAT2 = vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2", "high energy data Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname) Phi_high = squeeze(sum(sum(Phi_tot,0),0)) ###### Load low energy", "* from LabFuncs import * # This file doesn't save all its recoils", "random.uniform(size=nleft) for i in range(0,nleft): v_lab = LabVelocity(Jan1+67+t_gen[i]) v_lab = v_lab/sqrt(sum(v_lab**2.0)) x_rec =", "costhmin = 0.0 costh_edges = sqrt(linspace(0.0,1.0,ne+1)) costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges = logspace(log10(E_min),log10(E_max),ne+1) E_r_centers", "new distribution is merged with a previous one to make it smoother #", "run this file (both Xe131 and Ar40) around 10 times to get #", "energy FLUKA data dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4", "=\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter each neutrino E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window and get", "to get # a nice distribution, but the recoil file ends up being", "= R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 = R_Atm*R1/sum(sum(R1)) R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]])", "if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('merged') else: savetxt(recoildat_fname1,DAT1)", "= loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('merged')", "# it's not in the git repository) #==============================================================================# # Input Nuc = eval(sys.argv[1])", "nfine = 1000 E_nu_max = 1.0e4 E_fine = linspace(E_join[0],E_nu_max,nfine) Phi_fine = interp(E_fine,E_join,Phi_join) #", "= (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges = logspace(log10(E_min),log10(E_max),ne+1) E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] = meshgrid(E_r_centers,costh_centers) eff2 =", "Join the two E_join = append(E_low[0:260],E_high[9:]) Phi_join = append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate to create", "range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 = R_Atm*R1/sum(sum(R1)) R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 = R_Atm*R2/sum(sum(R2)) DAT1 =", "recoil file ends up being huge (which is why # it's not in", "= phi_r_gen[mask_window] costh_r_gen = costh_r_gen[mask_window] nleft = size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating Cygnus angles') costh_r_gen_2", "costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges = logspace(log10(E_min),log10(E_max),ne+1) E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] = meshgrid(E_r_centers,costh_centers) eff2", "data Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname) Phi_high = squeeze(sum(sum(Phi_tot,0),0)) ###### Load low energy FLUKA data", "plot of energy/phi/costh distribution. So everytime this file # is run the new", "because we need a large number to # make a nice plot of", "the recoil file ends up being huge (which is why # it's not", "energy data Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname) Phi_high = squeeze(sum(sum(Phi_tot,0),0)) ###### Load low energy FLUKA", "Cygnus angles') costh_r_gen_2 = zeros(shape=nleft) t_gen = random.uniform(size=nleft) for i in range(0,nleft): v_lab", "loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('merged') else:", "append(E_low[0:260],E_high[9:]) Phi_join = append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate to create new array nfine = 1000", "0.0 costh_edges = sqrt(linspace(0.0,1.0,ne+1)) costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges = logspace(log10(E_min),log10(E_max),ne+1) E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0", "time. # I needed to run this file (both Xe131 and Ar40) around", "(E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] = meshgrid(E_r_centers,costh_centers) eff2 = efficiency(Nuc,E) # Atmospheric neutrino rate R_Atm =", "dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join the two E_join = append(E_low[0:260],E_high[9:]) Phi_join = append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate", "zeros(shape=nleft) t_gen = random.uniform(size=nleft) for i in range(0,nleft): v_lab = LabVelocity(Jan1+67+t_gen[i]) v_lab =", "= R_Atm*R1/sum(sum(R1)) R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 = R_Atm*R2/sum(sum(R2)) DAT1 = vstack((costh_centers,E_r_centers,R1)) DAT2", "* from NeutrinoFuncs import * from LabFuncs import * # This file doesn't", "* from numpy import random from Params import * from NeutrinoFuncs import *", "Scatter each neutrino E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window and get angles mask_window =", "to make it smoother # each time. # I needed to run this", "neutrino rate R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 = R_Atm*R1/sum(sum(R1)) R2,ce,ee", "Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname) Phi_high = squeeze(sum(sum(Phi_tot,0),0)) ###### Load low energy FLUKA data dat1", "file # is run the new distribution is merged with a previous one", "recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists = os.path.exists(recoildat_fname1) if file_exists: DAT_prev1 = loadtxt(recoildat_fname1) DAT_prev2 = loadtxt(recoildat_fname2) if", "neutrino E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window and get angles mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen", "= R_Atm*R2/sum(sum(R2)) DAT1 = vstack((costh_centers,E_r_centers,R1)) DAT2 = vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 =", "20 #### Load high energy data Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname) Phi_high = squeeze(sum(sum(Phi_tot,0),0)) ######", "#### Load high energy data Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname) Phi_high = squeeze(sum(sum(Phi_tot,0),0)) ###### Load", "E_fine = linspace(E_join[0],E_nu_max,nfine) Phi_fine = interp(E_fine,E_join,Phi_join) # Generate ngen initial energies and directions", "= array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i] = sum(v_lab*x_rec) # Binning costhmin = 0.0 costh_edges", "(DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('merged') else: savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('overwritten') else: savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('first write')", "###### Join the two E_join = append(E_low[0:260],E_high[9:]) Phi_join = append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate to", "= vstack((costh_centers,E_r_centers,R1)) DAT2 = vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists =", "I needed to run this file (both Xe131 and Ar40) around 10 times", "= 400.0 #==============================================================================# ngen = 1000000 fname = 'AtmNu_GranSasso_SolarMin.d' ne = 20 ####", "##### Interpolate to create new array nfine = 1000 E_nu_max = 1.0e4 E_fine", "R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 = R_Atm*R1/sum(sum(R1)) R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\", "dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low = dat1[:,0] Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ######", "vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists = os.path.exists(recoildat_fname1) if file_exists: DAT_prev1", "with a previous one to make it smoother # each time. # I", "initial energies and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter each neutrino E_r_gen,phi_r_gen,costh_r_gen =\\", "elif Nuc.Name=='Ar': E_min = 20.0 E_max = 400.0 #==============================================================================# ngen = 1000000 fname", "need a large number to # make a nice plot of energy/phi/costh distribution.", "E_max = 400.0 #==============================================================================# ngen = 1000000 fname = 'AtmNu_GranSasso_SolarMin.d' ne = 20", "= GetAtmNuFluxes(fname) Phi_high = squeeze(sum(sum(Phi_tot,0),0)) ###### Load low energy FLUKA data dat1 =", "linspace(E_join[0],E_nu_max,nfine) Phi_fine = interp(E_fine,E_join,Phi_join) # Generate ngen initial energies and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\", "# each time. # I needed to run this file (both Xe131 and", "E_r_gen[mask_window] phi_r_gen = phi_r_gen[mask_window] costh_r_gen = costh_r_gen[mask_window] nleft = size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating Cygnus", "efficiency(Nuc,E) # Atmospheric neutrino rate R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1", "size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating Cygnus angles') costh_r_gen_2 = zeros(shape=nleft) t_gen = random.uniform(size=nleft) for i", "= 2.0 E_max = 200.0 elif Nuc.Name=='Ar': E_min = 20.0 E_max = 400.0", "v_lab = v_lab/sqrt(sum(v_lab**2.0)) x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i] = sum(v_lab*x_rec) # Binning", "is run the new distribution is merged with a previous one to make", "Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join the two E_join = append(E_low[0:260],E_high[9:]) Phi_join = append(Phi_low[0:260],Phi_high[9:])", "print('nleft=',size(costh_r_gen)) print('Generating Cygnus angles') costh_r_gen_2 = zeros(shape=nleft) t_gen = random.uniform(size=nleft) for i in", "file ends up being huge (which is why # it's not in the", "R1 = R_Atm*R1/sum(sum(R1)) R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 = R_Atm*R2/sum(sum(R2)) DAT1 = vstack((costh_centers,E_r_centers,R1))", "= LabVelocity(Jan1+67+t_gen[i]) v_lab = v_lab/sqrt(sum(v_lab**2.0)) x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i] = sum(v_lab*x_rec)", "a nice plot of energy/phi/costh distribution. So everytime this file # is run", "',Nuc.Name) if Nuc.Name=='Xe': E_min = 2.0 E_max = 200.0 elif Nuc.Name=='Ar': E_min =", "is why # it's not in the git repository) #==============================================================================# # Input Nuc", "E_r_gen = E_r_gen[mask_window] phi_r_gen = phi_r_gen[mask_window] costh_r_gen = costh_r_gen[mask_window] nleft = size(costh_r_gen) print('nleft=',size(costh_r_gen))", "doesn't save all its recoils because we need a large number to #", "import * from numpy import random from Params import * from NeutrinoFuncs import", "= eval(sys.argv[1]) print('Nucleus = ',Nuc.Name) if Nuc.Name=='Xe': E_min = 2.0 E_max = 200.0", "the git repository) #==============================================================================# # Input Nuc = eval(sys.argv[1]) print('Nucleus = ',Nuc.Name) if", "= logspace(log10(E_min),log10(E_max),ne+1) E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] = meshgrid(E_r_centers,costh_centers) eff2 = efficiency(Nuc,E) # Atmospheric", "= loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low =", "1000000 fname = 'AtmNu_GranSasso_SolarMin.d' ne = 20 #### Load high energy data Phi_tot,E_high,cosZ,phi_Az", "print('Generating Cygnus angles') costh_r_gen_2 = zeros(shape=nleft) t_gen = random.uniform(size=nleft) for i in range(0,nleft):", "200.0 elif Nuc.Name=='Ar': E_min = 20.0 E_max = 400.0 #==============================================================================# ngen = 1000000", "* # This file doesn't save all its recoils because we need a", "a large number to # make a nice plot of energy/phi/costh distribution. So", "repository) #==============================================================================# # Input Nuc = eval(sys.argv[1]) print('Nucleus = ',Nuc.Name) if Nuc.Name=='Xe': E_min", "nice distribution, but the recoil file ends up being huge (which is why", "LabVelocity(Jan1+67+t_gen[i]) v_lab = v_lab/sqrt(sum(v_lab**2.0)) x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i] = sum(v_lab*x_rec) #", "= sum(v_lab*x_rec) # Binning costhmin = 0.0 costh_edges = sqrt(linspace(0.0,1.0,ne+1)) costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0", "import * from NeutrinoFuncs import * from LabFuncs import * # This file", "'AtmNu_GranSasso_SolarMin.d' ne = 20 #### Load high energy data Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname) Phi_high", "= 200.0 elif Nuc.Name=='Ar': E_min = 20.0 E_max = 400.0 #==============================================================================# ngen =", "up being huge (which is why # it's not in the git repository)", "around 10 times to get # a nice distribution, but the recoil file", "= append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate to create new array nfine = 1000 E_nu_max =", "Generate ngen initial energies and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter each neutrino", "E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] = meshgrid(E_r_centers,costh_centers) eff2 = efficiency(Nuc,E) # Atmospheric neutrino rate", "costh_r_gen = costh_r_gen[mask_window] nleft = size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating Cygnus angles') costh_r_gen_2 = zeros(shape=nleft)", "E_low = dat1[:,0] Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join the two E_join = append(E_low[0:260],E_high[9:])", "the new distribution is merged with a previous one to make it smoother", "Nuc = eval(sys.argv[1]) print('Nucleus = ',Nuc.Name) if Nuc.Name=='Xe': E_min = 2.0 E_max =", "it's not in the git repository) #==============================================================================# # Input Nuc = eval(sys.argv[1]) print('Nucleus", "= os.path.exists(recoildat_fname1) if file_exists: DAT_prev1 = loadtxt(recoildat_fname1) DAT_prev2 = loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:]", "is merged with a previous one to make it smoother # each time.", "= E_r_gen[mask_window] phi_r_gen = phi_r_gen[mask_window] costh_r_gen = costh_r_gen[mask_window] nleft = size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating", "from Params import * from NeutrinoFuncs import * from LabFuncs import * #", "= loadtxt(recoildat_fname1) DAT_prev2 = loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0", "its recoils because we need a large number to # make a nice", "FLUKA data dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 =", "smoother # each time. # I needed to run this file (both Xe131", "= dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join the two E_join = append(E_low[0:260],E_high[9:]) Phi_join = append(Phi_low[0:260],Phi_high[9:]) #####", "interp(E_fine,E_join,Phi_join) # Generate ngen initial energies and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter", "= efficiency(Nuc,E) # Atmospheric neutrino rate R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]])", "file doesn't save all its recoils because we need a large number to", "= meshgrid(E_r_centers,costh_centers) eff2 = efficiency(Nuc,E) # Atmospheric neutrino rate R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee", "import * # This file doesn't save all its recoils because we need", "= (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('merged') else: savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('overwritten') else:", "(both Xe131 and Ar40) around 10 times to get # a nice distribution,", "dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low", "loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low = dat1[:,0] Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join the", "= (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] = meshgrid(E_r_centers,costh_centers) eff2 = efficiency(Nuc,E) # Atmospheric neutrino rate R_Atm", "from numpy import random from Params import * from NeutrinoFuncs import * from", "1000 E_nu_max = 1.0e4 E_fine = linspace(E_join[0],E_nu_max,nfine) Phi_fine = interp(E_fine,E_join,Phi_join) # Generate ngen", "# is run the new distribution is merged with a previous one to", "# Atmospheric neutrino rate R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 =", "Phi_join = append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate to create new array nfine = 1000 E_nu_max", "squeeze(sum(sum(Phi_tot,0),0)) ###### Load low energy FLUKA data dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',')", "Load low energy FLUKA data dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 =", "Interpolate to create new array nfine = 1000 E_nu_max = 1.0e4 E_fine =", "numpy import * from numpy import random from Params import * from NeutrinoFuncs", "and get angles mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen = E_r_gen[mask_window] phi_r_gen = phi_r_gen[mask_window] costh_r_gen", "needed to run this file (both Xe131 and Ar40) around 10 times to", "DAT1 = vstack((costh_centers,E_r_centers,R1)) DAT2 = vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists", "Phi_high = squeeze(sum(sum(Phi_tot,0),0)) ###### Load low energy FLUKA data dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2", "import sys sys.path.append('../src') from numpy import * from numpy import random from Params", "= v_lab/sqrt(sum(v_lab**2.0)) x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i] = sum(v_lab*x_rec) # Binning costhmin", "costh_r_gen[mask_window] nleft = size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating Cygnus angles') costh_r_gen_2 = zeros(shape=nleft) t_gen =", "file_exists = os.path.exists(recoildat_fname1) if file_exists: DAT_prev1 = loadtxt(recoildat_fname1) DAT_prev2 = loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]):", "E_join = append(E_low[0:260],E_high[9:]) Phi_join = append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate to create new array nfine", "GetAtmNuFluxes(fname) Phi_high = squeeze(sum(sum(Phi_tot,0),0)) ###### Load low energy FLUKA data dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',')", "So everytime this file # is run the new distribution is merged with", "(which is why # it's not in the git repository) #==============================================================================# # Input", "file_exists: DAT_prev1 = loadtxt(recoildat_fname1) DAT_prev2 = loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:]", "This file doesn't save all its recoils because we need a large number", "= (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('merged') else: savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('overwritten') else: savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('first", "Binning costhmin = 0.0 costh_edges = sqrt(linspace(0.0,1.0,ne+1)) costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges = logspace(log10(E_min),log10(E_max),ne+1)", "create new array nfine = 1000 E_nu_max = 1.0e4 E_fine = linspace(E_join[0],E_nu_max,nfine) Phi_fine", "range(0,nleft): v_lab = LabVelocity(Jan1+67+t_gen[i]) v_lab = v_lab/sqrt(sum(v_lab**2.0)) x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i]", "phi_r_gen[mask_window] costh_r_gen = costh_r_gen[mask_window] nleft = size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating Cygnus angles') costh_r_gen_2 =", "vstack((costh_centers,E_r_centers,R1)) DAT2 = vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists = os.path.exists(recoildat_fname1)", "rate R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 = R_Atm*R1/sum(sum(R1)) R2,ce,ee =", "angles mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen = E_r_gen[mask_window] phi_r_gen = phi_r_gen[mask_window] costh_r_gen = costh_r_gen[mask_window]", "# This file doesn't save all its recoils because we need a large", "DAT_prev1 = loadtxt(recoildat_fname1) DAT_prev2 = loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] =", "import * from LabFuncs import * # This file doesn't save all its", "# make a nice plot of energy/phi/costh distribution. So everytime this file #", "v_lab = LabVelocity(Jan1+67+t_gen[i]) v_lab = v_lab/sqrt(sum(v_lab**2.0)) x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i] =", "400.0 #==============================================================================# ngen = 1000000 fname = 'AtmNu_GranSasso_SolarMin.d' ne = 20 #### Load", "costh_r_gen[i]]) costh_r_gen_2[i] = sum(v_lab*x_rec) # Binning costhmin = 0.0 costh_edges = sqrt(linspace(0.0,1.0,ne+1)) costh_centers", "# Window and get angles mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen = E_r_gen[mask_window] phi_r_gen =", "= ',Nuc.Name) if Nuc.Name=='Xe': E_min = 2.0 E_max = 200.0 elif Nuc.Name=='Ar': E_min", "costh_r_gen_2 = zeros(shape=nleft) t_gen = random.uniform(size=nleft) for i in range(0,nleft): v_lab = LabVelocity(Jan1+67+t_gen[i])", "DAT_prev2 = loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2)", "nleft = size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating Cygnus angles') costh_r_gen_2 = zeros(shape=nleft) t_gen = random.uniform(size=nleft)", "merged with a previous one to make it smoother # each time. #", "# Input Nuc = eval(sys.argv[1]) print('Nucleus = ',Nuc.Name) if Nuc.Name=='Xe': E_min = 2.0", "get angles mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen = E_r_gen[mask_window] phi_r_gen = phi_r_gen[mask_window] costh_r_gen =", "directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter each neutrino E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window", "=\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window and get angles mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen = E_r_gen[mask_window]", "LabFuncs import * # This file doesn't save all its recoils because we", "append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate to create new array nfine = 1000 E_nu_max = 1.0e4", "ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window and get angles mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen = E_r_gen[mask_window] phi_r_gen", "E_nu_max = 1.0e4 E_fine = linspace(E_join[0],E_nu_max,nfine) Phi_fine = interp(E_fine,E_join,Phi_join) # Generate ngen initial", "array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i] = sum(v_lab*x_rec) # Binning costhmin = 0.0 costh_edges =", "it smoother # each time. # I needed to run this file (both", "eval(sys.argv[1]) print('Nucleus = ',Nuc.Name) if Nuc.Name=='Xe': E_min = 2.0 E_max = 200.0 elif", "loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low = dat1[:,0]", "= histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 = R_Atm*R1/sum(sum(R1)) R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 = R_Atm*R2/sum(sum(R2))", "= size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating Cygnus angles') costh_r_gen_2 = zeros(shape=nleft) t_gen = random.uniform(size=nleft) for", "number to # make a nice plot of energy/phi/costh distribution. So everytime this", "(costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges = logspace(log10(E_min),log10(E_max),ne+1) E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] = meshgrid(E_r_centers,costh_centers) eff2 = efficiency(Nuc,E)", "data dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',')", "= loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low = dat1[:,0] Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join", "Nuc.Name=='Xe': E_min = 2.0 E_max = 200.0 elif Nuc.Name=='Ar': E_min = 20.0 E_max", "= 1.0e4 E_fine = linspace(E_join[0],E_nu_max,nfine) Phi_fine = interp(E_fine,E_join,Phi_join) # Generate ngen initial energies", "E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) # Window and get angles mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen =", "# Binning costhmin = 0.0 costh_edges = sqrt(linspace(0.0,1.0,ne+1)) costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges =", "ends up being huge (which is why # it's not in the git", "make it smoother # each time. # I needed to run this file", "x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]]) costh_r_gen_2[i] = sum(v_lab*x_rec) # Binning costhmin = 0.0", "distribution, but the recoil file ends up being huge (which is why #", "dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low = dat1[:,0] Phi_low", "energies and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter each neutrino E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen)", "= random.uniform(size=nleft) for i in range(0,nleft): v_lab = LabVelocity(Jan1+67+t_gen[i]) v_lab = v_lab/sqrt(sum(v_lab**2.0)) x_rec", "= 0.0 costh_edges = sqrt(linspace(0.0,1.0,ne+1)) costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges = logspace(log10(E_min),log10(E_max),ne+1) E_r_centers =", "Window and get angles mask_window = (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen = E_r_gen[mask_window] phi_r_gen = phi_r_gen[mask_window]", "E_min = 2.0 E_max = 200.0 elif Nuc.Name=='Ar': E_min = 20.0 E_max =", "= recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists = os.path.exists(recoildat_fname1) if file_exists: DAT_prev1 = loadtxt(recoildat_fname1) DAT_prev2 = loadtxt(recoildat_fname2)", "10 times to get # a nice distribution, but the recoil file ends", "angles') costh_r_gen_2 = zeros(shape=nleft) t_gen = random.uniform(size=nleft) for i in range(0,nleft): v_lab =", "huge (which is why # it's not in the git repository) #==============================================================================# #", "two E_join = append(E_low[0:260],E_high[9:]) Phi_join = append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate to create new array", "dat1[:,0] Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join the two E_join = append(E_low[0:260],E_high[9:]) Phi_join =", "(shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('merged') else: savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2)", "= loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low = dat1[:,0] Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join the two E_join", "a previous one to make it smoother # each time. # I needed", "and Ar40) around 10 times to get # a nice distribution, but the", "from NeutrinoFuncs import * from LabFuncs import * # This file doesn't save", "numpy import random from Params import * from NeutrinoFuncs import * from LabFuncs", "being huge (which is why # it's not in the git repository) #==============================================================================#", "eff2 = efficiency(Nuc,E) # Atmospheric neutrino rate R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\", "# a nice distribution, but the recoil file ends up being huge (which", "Xe131 and Ar40) around 10 times to get # a nice distribution, but", "this file # is run the new distribution is merged with a previous", "= loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',') dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low = dat1[:,0] Phi_low =", "of energy/phi/costh distribution. So everytime this file # is run the new distribution", "E_r_edges = logspace(log10(E_min),log10(E_max),ne+1) E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] = meshgrid(E_r_centers,costh_centers) eff2 = efficiency(Nuc,E) #", "meshgrid(E_r_centers,costh_centers) eff2 = efficiency(Nuc,E) # Atmospheric neutrino rate R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False) R1,ce,ee =", "<gh_stars>1-10 import os import sys sys.path.append('../src') from numpy import * from numpy import", "loadtxt(recoildat_fname1) DAT_prev2 = loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1)", "one to make it smoother # each time. # I needed to run", "= vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists = os.path.exists(recoildat_fname1) if file_exists:", "# Generate ngen initial energies and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter each", "fname = 'AtmNu_GranSasso_SolarMin.d' ne = 20 #### Load high energy data Phi_tot,E_high,cosZ,phi_Az =", "energy/phi/costh distribution. So everytime this file # is run the new distribution is", "= recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists = os.path.exists(recoildat_fname1) if file_exists: DAT_prev1 = loadtxt(recoildat_fname1)", "Load high energy data Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname) Phi_high = squeeze(sum(sum(Phi_tot,0),0)) ###### Load low", "recoils because we need a large number to # make a nice plot", "recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists = os.path.exists(recoildat_fname1) if file_exists: DAT_prev1 =", "this file (both Xe131 and Ar40) around 10 times to get # a", "[E,C] = meshgrid(E_r_centers,costh_centers) eff2 = efficiency(Nuc,E) # Atmospheric neutrino rate R_Atm = R_AtmNu(E_min,E_max,Nuc=Nuc,eff_on=False)", "= linspace(E_join[0],E_nu_max,nfine) Phi_fine = interp(E_fine,E_join,Phi_join) # Generate ngen initial energies and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen", "import random from Params import * from NeutrinoFuncs import * from LabFuncs import", "= 20.0 E_max = 400.0 #==============================================================================# ngen = 1000000 fname = 'AtmNu_GranSasso_SolarMin.d' ne", "if file_exists: DAT_prev1 = loadtxt(recoildat_fname1) DAT_prev2 = loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0", "get # a nice distribution, but the recoil file ends up being huge", "i in range(0,nleft): v_lab = LabVelocity(Jan1+67+t_gen[i]) v_lab = v_lab/sqrt(sum(v_lab**2.0)) x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0),", "= (E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen = E_r_gen[mask_window] phi_r_gen = phi_r_gen[mask_window] costh_r_gen = costh_r_gen[mask_window] nleft =", "= interp(E_fine,E_join,Phi_join) # Generate ngen initial energies and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) #", "in the git repository) #==============================================================================# # Input Nuc = eval(sys.argv[1]) print('Nucleus = ',Nuc.Name)", "sys.path.append('../src') from numpy import * from numpy import random from Params import *", "20.0 E_max = 400.0 #==============================================================================# ngen = 1000000 fname = 'AtmNu_GranSasso_SolarMin.d' ne =", "costh_edges = sqrt(linspace(0.0,1.0,ne+1)) costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges = logspace(log10(E_min),log10(E_max),ne+1) E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C]", "file (both Xe131 and Ar40) around 10 times to get # a nice", "R_Atm*R1/sum(sum(R1)) R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 = R_Atm*R2/sum(sum(R2)) DAT1 = vstack((costh_centers,E_r_centers,R1)) DAT2 =", "but the recoil file ends up being huge (which is why # it's", "Nuc.Name=='Ar': E_min = 20.0 E_max = 400.0 #==============================================================================# ngen = 1000000 fname =", "(DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('merged') else: savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('overwritten') else: savetxt(recoildat_fname1,DAT1)", "sum(v_lab*x_rec) # Binning costhmin = 0.0 costh_edges = sqrt(linspace(0.0,1.0,ne+1)) costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges", "= 1000 E_nu_max = 1.0e4 E_fine = linspace(E_join[0],E_nu_max,nfine) Phi_fine = interp(E_fine,E_join,Phi_join) # Generate", "Phi_fine = interp(E_fine,E_join,Phi_join) # Generate ngen initial energies and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc)", "in range(0,nleft): v_lab = LabVelocity(Jan1+67+t_gen[i]) v_lab = v_lab/sqrt(sum(v_lab**2.0)) x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), sin(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0), costh_r_gen[i]])", "all its recoils because we need a large number to # make a", "DAT2 = vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists = os.path.exists(recoildat_fname1) if", "times to get # a nice distribution, but the recoil file ends up", "histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 = R_Atm*R2/sum(sum(R2)) DAT1 = vstack((costh_centers,E_r_centers,R1)) DAT2 = vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 =", "Params import * from NeutrinoFuncs import * from LabFuncs import * # This", "for i in range(0,nleft): v_lab = LabVelocity(Jan1+67+t_gen[i]) v_lab = v_lab/sqrt(sum(v_lab**2.0)) x_rec = array([cos(phi_r_gen[i])*sqrt(1-costh_r_gen[i]**2.0),", "save all its recoils because we need a large number to # make", "= zeros(shape=nleft) t_gen = random.uniform(size=nleft) for i in range(0,nleft): v_lab = LabVelocity(Jan1+67+t_gen[i]) v_lab", "# I needed to run this file (both Xe131 and Ar40) around 10", "= append(E_low[0:260],E_high[9:]) Phi_join = append(Phi_low[0:260],Phi_high[9:]) ##### Interpolate to create new array nfine =", "and directions E_gen,phi_nu_gen,costh_nu_gen,E_r_gen =\\ GenerateAtmNuDirections(ngen,E_fine,Phi_fine,E_high,Phi_tot,cosZ,phi_Az,Nuc) # Scatter each neutrino E_r_gen,phi_r_gen,costh_r_gen =\\ ScatterNeutrinos(Nuc,E_gen,phi_nu_gen,costh_nu_gen,E_r_gen) #", "from LabFuncs import * # This file doesn't save all its recoils because", "recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists = os.path.exists(recoildat_fname1) if file_exists: DAT_prev1 = loadtxt(recoildat_fname1) DAT_prev2 =", "DAT1[2:,:] = (DAT_prev1[2:,:]+DAT1[2:,:])/2.0 DAT2[2:,:] = (DAT_prev2[2:,:]+DAT2[2:,:])/2.0 savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('merged') else: savetxt(recoildat_fname1,DAT1) savetxt(recoildat_fname2,DAT2) print('overwritten')", "everytime this file # is run the new distribution is merged with a", "array nfine = 1000 E_nu_max = 1.0e4 E_fine = linspace(E_join[0],E_nu_max,nfine) Phi_fine = interp(E_fine,E_join,Phi_join)", "os.path.exists(recoildat_fname1) if file_exists: DAT_prev1 = loadtxt(recoildat_fname1) DAT_prev2 = loadtxt(recoildat_fname2) if (shape(DAT_prev1)[0]==shape(DAT1)[0])&(shape(DAT_prev1)[1]==shape(DAT1)[1]): DAT1[2:,:] =", "dat4 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_ebar.txt',delimiter=',') E_low = dat1[:,0] Phi_low = dat1[:,1]+dat2[:,1]+dat3[:,1]+dat4[:,1] ###### Join the two", "low energy FLUKA data dat1 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mubar.txt',delimiter=',') dat2 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_mu.txt',delimiter=',') dat3 = loadtxt(nufile_dir+'/atmospheric/FLUKA/AtmNu_e.txt',delimiter=',')", "R1,ce,ee = histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 = R_Atm*R1/sum(sum(R1)) R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 =", "= 1000000 fname = 'AtmNu_GranSasso_SolarMin.d' ne = 20 #### Load high energy data", "to # make a nice plot of energy/phi/costh distribution. So everytime this file", "range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 = R_Atm*R2/sum(sum(R2)) DAT1 = vstack((costh_centers,E_r_centers,R1)) DAT2 = vstack((costh_centers,E_r_centers,R2)) recoildat_fname1 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt'", "to create new array nfine = 1000 E_nu_max = 1.0e4 E_fine = linspace(E_join[0],E_nu_max,nfine)", "R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 = R_Atm*R2/sum(sum(R2)) DAT1 = vstack((costh_centers,E_r_centers,R1)) DAT2 = vstack((costh_centers,E_r_centers,R2))", "#==============================================================================# ngen = 1000000 fname = 'AtmNu_GranSasso_SolarMin.d' ne = 20 #### Load high", "phi_r_gen = phi_r_gen[mask_window] costh_r_gen = costh_r_gen[mask_window] nleft = size(costh_r_gen) print('nleft=',size(costh_r_gen)) print('Generating Cygnus angles')", "git repository) #==============================================================================# # Input Nuc = eval(sys.argv[1]) print('Nucleus = ',Nuc.Name) if Nuc.Name=='Xe':", "distribution is merged with a previous one to make it smoother # each", "t_gen = random.uniform(size=nleft) for i in range(0,nleft): v_lab = LabVelocity(Jan1+67+t_gen[i]) v_lab = v_lab/sqrt(sum(v_lab**2.0))", "histogram2d(abs(costh_r_gen),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R1 = R_Atm*R1/sum(sum(R1)) R2,ce,ee = histogram2d(abs(costh_r_gen_2),log10(E_r_gen),bins=(ne,ne),\\ range=[[0.0,1.0],[log10(E_min),log10(E_max)]]) R2 = R_Atm*R2/sum(sum(R2)) DAT1", "= 20 #### Load high energy data Phi_tot,E_high,cosZ,phi_Az = GetAtmNuFluxes(fname) Phi_high = squeeze(sum(sum(Phi_tot,0),0))", "ngen = 1000000 fname = 'AtmNu_GranSasso_SolarMin.d' ne = 20 #### Load high energy", "why # it's not in the git repository) #==============================================================================# # Input Nuc =", "(E_r_gen<=E_max)*(E_r_gen>=E_min) E_r_gen = E_r_gen[mask_window] phi_r_gen = phi_r_gen[mask_window] costh_r_gen = costh_r_gen[mask_window] nleft = size(costh_r_gen)", "previous one to make it smoother # each time. # I needed to", "sqrt(linspace(0.0,1.0,ne+1)) costh_centers = (costh_edges[1:]+costh_edges[0:-1])/2.0 E_r_edges = logspace(log10(E_min),log10(E_max),ne+1) E_r_centers = (E_r_edges[1:]+E_r_edges[0:-1])/2.0 [E,C] = meshgrid(E_r_centers,costh_centers)", "random from Params import * from NeutrinoFuncs import * from LabFuncs import *", "recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_Stationary.txt' recoildat_fname2 = recoil_dir+'AtmNu_Ecosth_'+Nuc.Name+'_CygnusTracking.txt' file_exists = os.path.exists(recoildat_fname1) if file_exists: DAT_prev1 = loadtxt(recoildat_fname1) DAT_prev2", "new array nfine = 1000 E_nu_max = 1.0e4 E_fine = linspace(E_join[0],E_nu_max,nfine) Phi_fine =", "Input Nuc = eval(sys.argv[1]) print('Nucleus = ',Nuc.Name) if Nuc.Name=='Xe': E_min = 2.0 E_max" ]
[ "# Define your item pipelines here # # Don't forget to add your", "self.db = self.client[self.mongo_db] def close_spider(self, spider): self.client.close() def process_item(self, item, spider): for data", "scrapy import log class CoinnewsPipeline(object): collection_name = 'coin_articles' def __init__(self, mongo_uri, mongo_db): self.mongo_uri", "self.client = pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def close_spider(self, spider): self.client.close() def process_item(self, item,", "# -*- coding: utf-8 -*- # Define your item pipelines here # #", "mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') ) def open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db]", "close_spider(self, spider): self.client.close() def process_item(self, item, spider): for data in item: if not", "import settings from scrapy.exceptions import DropItem from scrapy import log class CoinnewsPipeline(object): collection_name", "collection_name = 'coin_articles' def __init__(self, mongo_uri, mongo_db): self.mongo_uri = mongo_uri self.mongo_db = mongo_db", "# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from scrapy.conf import settings from scrapy.exceptions import DropItem", "spider): self.client = pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def close_spider(self, spider): self.client.close() def process_item(self,", "import log class CoinnewsPipeline(object): collection_name = 'coin_articles' def __init__(self, mongo_uri, mongo_db): self.mongo_uri =", "= 'coin_articles' def __init__(self, mongo_uri, mongo_db): self.mongo_uri = mongo_uri self.mongo_db = mongo_db @classmethod", "def __init__(self, mongo_uri, mongo_db): self.mongo_uri = mongo_uri self.mongo_db = mongo_db @classmethod def from_crawler(cls,", "scrapy.exceptions import DropItem from scrapy import log class CoinnewsPipeline(object): collection_name = 'coin_articles' def", "raise DropItem(\"Missing data!\") self.db[self.collection_name].insert_one(dict(item)) log.msg(\"Question added to MongoDB database!\", level=log.DEBUG, spider=spider) return item", "scrapy.conf import settings from scrapy.exceptions import DropItem from scrapy import log class CoinnewsPipeline(object):", "import pymongo from scrapy.conf import settings from scrapy.exceptions import DropItem from scrapy import", "Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html", "for data in item: if not data: raise DropItem(\"Missing data!\") self.db[self.collection_name].insert_one(dict(item)) log.msg(\"Question added", "from scrapy.conf import settings from scrapy.exceptions import DropItem from scrapy import log class", "from scrapy import log class CoinnewsPipeline(object): collection_name = 'coin_articles' def __init__(self, mongo_uri, mongo_db):", "mongo_uri self.mongo_db = mongo_db @classmethod def from_crawler(cls, crawler): return cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items')", "settings from scrapy.exceptions import DropItem from scrapy import log class CoinnewsPipeline(object): collection_name =", "your item pipelines here # # Don't forget to add your pipeline to", "to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from scrapy.conf import settings", "item, spider): for data in item: if not data: raise DropItem(\"Missing data!\") self.db[self.collection_name].insert_one(dict(item))", "setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from scrapy.conf import settings from scrapy.exceptions import", "CoinnewsPipeline(object): collection_name = 'coin_articles' def __init__(self, mongo_uri, mongo_db): self.mongo_uri = mongo_uri self.mongo_db =", "'items') ) def open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def close_spider(self,", "self.mongo_db = mongo_db @classmethod def from_crawler(cls, crawler): return cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') )", "spider): self.client.close() def process_item(self, item, spider): for data in item: if not data:", "utf-8 -*- # Define your item pipelines here # # Don't forget to", "pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES", "from scrapy.exceptions import DropItem from scrapy import log class CoinnewsPipeline(object): collection_name = 'coin_articles'", "ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from scrapy.conf import settings from scrapy.exceptions", "class CoinnewsPipeline(object): collection_name = 'coin_articles' def __init__(self, mongo_uri, mongo_db): self.mongo_uri = mongo_uri self.mongo_db", "self.mongo_uri = mongo_uri self.mongo_db = mongo_db @classmethod def from_crawler(cls, crawler): return cls( mongo_uri=crawler.settings.get('MONGO_URI'),", "'coin_articles' def __init__(self, mongo_uri, mongo_db): self.mongo_uri = mongo_uri self.mongo_db = mongo_db @classmethod def", "not data: raise DropItem(\"Missing data!\") self.db[self.collection_name].insert_one(dict(item)) log.msg(\"Question added to MongoDB database!\", level=log.DEBUG, spider=spider)", "cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') ) def open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_uri) self.db =", "self.client[self.mongo_db] def close_spider(self, spider): self.client.close() def process_item(self, item, spider): for data in item:", "open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def close_spider(self, spider): self.client.close() def", "= mongo_uri self.mongo_db = mongo_db @classmethod def from_crawler(cls, crawler): return cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE',", "pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from scrapy.conf import", "mongo_uri, mongo_db): self.mongo_uri = mongo_uri self.mongo_db = mongo_db @classmethod def from_crawler(cls, crawler): return", "add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from", "Define your item pipelines here # # Don't forget to add your pipeline", "pymongo from scrapy.conf import settings from scrapy.exceptions import DropItem from scrapy import log", "your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from scrapy.conf", "-*- coding: utf-8 -*- # Define your item pipelines here # # Don't", "def open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def close_spider(self, spider): self.client.close()", "DropItem from scrapy import log class CoinnewsPipeline(object): collection_name = 'coin_articles' def __init__(self, mongo_uri,", "mongo_db): self.mongo_uri = mongo_uri self.mongo_db = mongo_db @classmethod def from_crawler(cls, crawler): return cls(", "item pipelines here # # Don't forget to add your pipeline to the", "here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting", "# Don't forget to add your pipeline to the ITEM_PIPELINES setting # See:", "data in item: if not data: raise DropItem(\"Missing data!\") self.db[self.collection_name].insert_one(dict(item)) log.msg(\"Question added to", "def from_crawler(cls, crawler): return cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') ) def open_spider(self, spider): self.client", "import DropItem from scrapy import log class CoinnewsPipeline(object): collection_name = 'coin_articles' def __init__(self,", "coding: utf-8 -*- # Define your item pipelines here # # Don't forget", "@classmethod def from_crawler(cls, crawler): return cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') ) def open_spider(self, spider):", "mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') ) def open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def", "self.client.close() def process_item(self, item, spider): for data in item: if not data: raise", "mongo_db @classmethod def from_crawler(cls, crawler): return cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') ) def open_spider(self,", "data: raise DropItem(\"Missing data!\") self.db[self.collection_name].insert_one(dict(item)) log.msg(\"Question added to MongoDB database!\", level=log.DEBUG, spider=spider) return", "to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo", "item: if not data: raise DropItem(\"Missing data!\") self.db[self.collection_name].insert_one(dict(item)) log.msg(\"Question added to MongoDB database!\",", "See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from scrapy.conf import settings from scrapy.exceptions import DropItem from", "= self.client[self.mongo_db] def close_spider(self, spider): self.client.close() def process_item(self, item, spider): for data in", "# # Don't forget to add your pipeline to the ITEM_PIPELINES setting #", "from_crawler(cls, crawler): return cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') ) def open_spider(self, spider): self.client =", ") def open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def close_spider(self, spider):", "the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from scrapy.conf import settings from", "return cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') ) def open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_uri) self.db", "def close_spider(self, spider): self.client.close() def process_item(self, item, spider): for data in item: if", "= mongo_db @classmethod def from_crawler(cls, crawler): return cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') ) def", "forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import", "https://doc.scrapy.org/en/latest/topics/item-pipeline.html import pymongo from scrapy.conf import settings from scrapy.exceptions import DropItem from scrapy", "= pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def close_spider(self, spider): self.client.close() def process_item(self, item, spider):", "if not data: raise DropItem(\"Missing data!\") self.db[self.collection_name].insert_one(dict(item)) log.msg(\"Question added to MongoDB database!\", level=log.DEBUG,", "spider): for data in item: if not data: raise DropItem(\"Missing data!\") self.db[self.collection_name].insert_one(dict(item)) log.msg(\"Question", "process_item(self, item, spider): for data in item: if not data: raise DropItem(\"Missing data!\")", "crawler): return cls( mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DATABASE', 'items') ) def open_spider(self, spider): self.client = pymongo.MongoClient(self.mongo_uri)", "pymongo.MongoClient(self.mongo_uri) self.db = self.client[self.mongo_db] def close_spider(self, spider): self.client.close() def process_item(self, item, spider): for", "-*- # Define your item pipelines here # # Don't forget to add", "def process_item(self, item, spider): for data in item: if not data: raise DropItem(\"Missing", "log class CoinnewsPipeline(object): collection_name = 'coin_articles' def __init__(self, mongo_uri, mongo_db): self.mongo_uri = mongo_uri", "in item: if not data: raise DropItem(\"Missing data!\") self.db[self.collection_name].insert_one(dict(item)) log.msg(\"Question added to MongoDB", "__init__(self, mongo_uri, mongo_db): self.mongo_uri = mongo_uri self.mongo_db = mongo_db @classmethod def from_crawler(cls, crawler):" ]
[ "string is preceded by newline. # Translators: This is a helpful comment. _(", "is preceded by newline. # Translators: This is a helpful comment. _( '4')", "<filename>tests/shunit/data/bad_i18n_newline_4.py # Single-quoted string is preceded by newline. # Translators: This is a", "Single-quoted string is preceded by newline. # Translators: This is a helpful comment.", "# Single-quoted string is preceded by newline. # Translators: This is a helpful" ]
[ "== 10: verifyDig = 1 elif resTicket == 1: verifyDig = 0 elif", "= (t01 + t02 + t03 + t04 + t05 + t06 +", "verifyTicket(ticket): ticket = ticket[::-1] t01 = int(ticket[0]) * 2 t02 = int(ticket[1]) *", "+ t03 + t04 + t05 + t06 + t07 + t08 +", "resTicket == 1: verifyDig = 0 elif resTicket == 0: verifyDig = 0", "1: verifyDig = 0 elif resTicket == 0: verifyDig = 0 else: verifyDig", "int(ticket[11]) * 5 sumTicket = (t01 + t02 + t03 + t04 +", "* 7 t07 = int(ticket[6]) * 8 t08 = int(ticket[7]) * 9 t09", "== 0: verifyDig = 0 else: verifyDig = (11 - resTicket) return verifyDig", "t05 = int(ticket[4]) * 6 t06 = int(ticket[5]) * 7 t07 = int(ticket[6])", "= int(ticket[9]) * 3 t11 = int(ticket[10]) * 4 t12 = int(ticket[11]) *", "10: verifyDig = 1 elif resTicket == 1: verifyDig = 0 elif resTicket", "6 t06 = int(ticket[5]) * 7 t07 = int(ticket[6]) * 8 t08 =", "4 t04 = int(ticket[3]) * 5 t05 = int(ticket[4]) * 6 t06 =", "+ t05 + t06 + t07 + t08 + t09 + t10 +", "= int(ticket[7]) * 9 t09 = int(ticket[8]) * 2 t10 = int(ticket[9]) *", "= sumTicket % 11 verifyDig = 0 if resTicket == 10: verifyDig =", "verifyDig = 1 elif resTicket == 1: verifyDig = 0 elif resTicket ==", "t02 = int(ticket[1]) * 3 t03 = int(ticket[2]) * 4 t04 = int(ticket[3])", "= int(ticket[6]) * 8 t08 = int(ticket[7]) * 9 t09 = int(ticket[8]) *", "+ t06 + t07 + t08 + t09 + t10 + t11 +", "* 9 t09 = int(ticket[8]) * 2 t10 = int(ticket[9]) * 3 t11", "t09 = int(ticket[8]) * 2 t10 = int(ticket[9]) * 3 t11 = int(ticket[10])", "ticket[::-1] t01 = int(ticket[0]) * 2 t02 = int(ticket[1]) * 3 t03 =", "t06 = int(ticket[5]) * 7 t07 = int(ticket[6]) * 8 t08 = int(ticket[7])", "+ t09 + t10 + t11 + t12) resTicket = sumTicket % 11", "5 t05 = int(ticket[4]) * 6 t06 = int(ticket[5]) * 7 t07 =", "resTicket = sumTicket % 11 verifyDig = 0 if resTicket == 10: verifyDig", "int(ticket[5]) * 7 t07 = int(ticket[6]) * 8 t08 = int(ticket[7]) * 9", "resTicket == 10: verifyDig = 1 elif resTicket == 1: verifyDig = 0", "ticket = ticket[::-1] t01 = int(ticket[0]) * 2 t02 = int(ticket[1]) * 3", "int(ticket[1]) * 3 t03 = int(ticket[2]) * 4 t04 = int(ticket[3]) * 5", "t04 + t05 + t06 + t07 + t08 + t09 + t10", "= 0 elif resTicket == 0: verifyDig = 0 else: verifyDig = (11", "t06 + t07 + t08 + t09 + t10 + t11 + t12)", "% 11 verifyDig = 0 if resTicket == 10: verifyDig = 1 elif", "2 t10 = int(ticket[9]) * 3 t11 = int(ticket[10]) * 4 t12 =", "* 5 sumTicket = (t01 + t02 + t03 + t04 + t05", "resTicket == 0: verifyDig = 0 else: verifyDig = (11 - resTicket) return", "= int(ticket[0]) * 2 t02 = int(ticket[1]) * 3 t03 = int(ticket[2]) *", "= 1 elif resTicket == 1: verifyDig = 0 elif resTicket == 0:", "t02 + t03 + t04 + t05 + t06 + t07 + t08", "* 2 t10 = int(ticket[9]) * 3 t11 = int(ticket[10]) * 4 t12", "int(ticket[8]) * 2 t10 = int(ticket[9]) * 3 t11 = int(ticket[10]) * 4", "int(ticket[2]) * 4 t04 = int(ticket[3]) * 5 t05 = int(ticket[4]) * 6", "def verifyTicket(ticket): ticket = ticket[::-1] t01 = int(ticket[0]) * 2 t02 = int(ticket[1])", "t10 = int(ticket[9]) * 3 t11 = int(ticket[10]) * 4 t12 = int(ticket[11])", "sumTicket = (t01 + t02 + t03 + t04 + t05 + t06", "0 elif resTicket == 0: verifyDig = 0 else: verifyDig = (11 -", "int(ticket[9]) * 3 t11 = int(ticket[10]) * 4 t12 = int(ticket[11]) * 5", "2 t02 = int(ticket[1]) * 3 t03 = int(ticket[2]) * 4 t04 =", "+ t10 + t11 + t12) resTicket = sumTicket % 11 verifyDig =", "t11 + t12) resTicket = sumTicket % 11 verifyDig = 0 if resTicket", "= int(ticket[3]) * 5 t05 = int(ticket[4]) * 6 t06 = int(ticket[5]) *", "= int(ticket[4]) * 6 t06 = int(ticket[5]) * 7 t07 = int(ticket[6]) *", "= int(ticket[2]) * 4 t04 = int(ticket[3]) * 5 t05 = int(ticket[4]) *", "0 if resTicket == 10: verifyDig = 1 elif resTicket == 1: verifyDig", "verifyDig = 0 if resTicket == 10: verifyDig = 1 elif resTicket ==", "+ t07 + t08 + t09 + t10 + t11 + t12) resTicket", "t12) resTicket = sumTicket % 11 verifyDig = 0 if resTicket == 10:", "t11 = int(ticket[10]) * 4 t12 = int(ticket[11]) * 5 sumTicket = (t01", "t07 = int(ticket[6]) * 8 t08 = int(ticket[7]) * 9 t09 = int(ticket[8])", "= int(ticket[11]) * 5 sumTicket = (t01 + t02 + t03 + t04", "t03 = int(ticket[2]) * 4 t04 = int(ticket[3]) * 5 t05 = int(ticket[4])", "t03 + t04 + t05 + t06 + t07 + t08 + t09", "(t01 + t02 + t03 + t04 + t05 + t06 + t07", "+ t08 + t09 + t10 + t11 + t12) resTicket = sumTicket", "= int(ticket[10]) * 4 t12 = int(ticket[11]) * 5 sumTicket = (t01 +", "= int(ticket[8]) * 2 t10 = int(ticket[9]) * 3 t11 = int(ticket[10]) *", "9 t09 = int(ticket[8]) * 2 t10 = int(ticket[9]) * 3 t11 =", "* 4 t12 = int(ticket[11]) * 5 sumTicket = (t01 + t02 +", "elif resTicket == 1: verifyDig = 0 elif resTicket == 0: verifyDig =", "* 2 t02 = int(ticket[1]) * 3 t03 = int(ticket[2]) * 4 t04", "t09 + t10 + t11 + t12) resTicket = sumTicket % 11 verifyDig", "int(ticket[10]) * 4 t12 = int(ticket[11]) * 5 sumTicket = (t01 + t02", "int(ticket[6]) * 8 t08 = int(ticket[7]) * 9 t09 = int(ticket[8]) * 2", "+ t11 + t12) resTicket = sumTicket % 11 verifyDig = 0 if", "int(ticket[0]) * 2 t02 = int(ticket[1]) * 3 t03 = int(ticket[2]) * 4", "5 sumTicket = (t01 + t02 + t03 + t04 + t05 +", "t10 + t11 + t12) resTicket = sumTicket % 11 verifyDig = 0", "t04 = int(ticket[3]) * 5 t05 = int(ticket[4]) * 6 t06 = int(ticket[5])", "* 4 t04 = int(ticket[3]) * 5 t05 = int(ticket[4]) * 6 t06", "int(ticket[7]) * 9 t09 = int(ticket[8]) * 2 t10 = int(ticket[9]) * 3", "* 8 t08 = int(ticket[7]) * 9 t09 = int(ticket[8]) * 2 t10", "int(ticket[4]) * 6 t06 = int(ticket[5]) * 7 t07 = int(ticket[6]) * 8", "if resTicket == 10: verifyDig = 1 elif resTicket == 1: verifyDig =", "8 t08 = int(ticket[7]) * 9 t09 = int(ticket[8]) * 2 t10 =", "* 3 t11 = int(ticket[10]) * 4 t12 = int(ticket[11]) * 5 sumTicket", "3 t03 = int(ticket[2]) * 4 t04 = int(ticket[3]) * 5 t05 =", "t08 + t09 + t10 + t11 + t12) resTicket = sumTicket %", "verifyDig = 0 elif resTicket == 0: verifyDig = 0 else: verifyDig =", "* 3 t03 = int(ticket[2]) * 4 t04 = int(ticket[3]) * 5 t05", "elif resTicket == 0: verifyDig = 0 else: verifyDig = (11 - resTicket)", "+ t04 + t05 + t06 + t07 + t08 + t09 +", "t07 + t08 + t09 + t10 + t11 + t12) resTicket =", "+ t02 + t03 + t04 + t05 + t06 + t07 +", "= int(ticket[1]) * 3 t03 = int(ticket[2]) * 4 t04 = int(ticket[3]) *", "= 0 if resTicket == 10: verifyDig = 1 elif resTicket == 1:", "1 elif resTicket == 1: verifyDig = 0 elif resTicket == 0: verifyDig", "t12 = int(ticket[11]) * 5 sumTicket = (t01 + t02 + t03 +", "= ticket[::-1] t01 = int(ticket[0]) * 2 t02 = int(ticket[1]) * 3 t03", "= int(ticket[5]) * 7 t07 = int(ticket[6]) * 8 t08 = int(ticket[7]) *", "* 6 t06 = int(ticket[5]) * 7 t07 = int(ticket[6]) * 8 t08", "int(ticket[3]) * 5 t05 = int(ticket[4]) * 6 t06 = int(ticket[5]) * 7", "3 t11 = int(ticket[10]) * 4 t12 = int(ticket[11]) * 5 sumTicket =", "t08 = int(ticket[7]) * 9 t09 = int(ticket[8]) * 2 t10 = int(ticket[9])", "* 5 t05 = int(ticket[4]) * 6 t06 = int(ticket[5]) * 7 t07", "11 verifyDig = 0 if resTicket == 10: verifyDig = 1 elif resTicket", "== 1: verifyDig = 0 elif resTicket == 0: verifyDig = 0 else:", "7 t07 = int(ticket[6]) * 8 t08 = int(ticket[7]) * 9 t09 =", "t05 + t06 + t07 + t08 + t09 + t10 + t11", "+ t12) resTicket = sumTicket % 11 verifyDig = 0 if resTicket ==", "4 t12 = int(ticket[11]) * 5 sumTicket = (t01 + t02 + t03", "sumTicket % 11 verifyDig = 0 if resTicket == 10: verifyDig = 1", "t01 = int(ticket[0]) * 2 t02 = int(ticket[1]) * 3 t03 = int(ticket[2])" ]
[ "to_insert in matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes = [trial_match['hash'] for trial_match in matches_to_mark_available] for chunk", "if protocol_no not in matchengine.matches or protocol_no not in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was", "-> list: \"\"\" Get matches to disable by looking for existing, enabled matches", "db which have the same hashes as newly found matches. :param matchengine: :param", "matches during current run. Done for every sample_id :param matchengine: :param new_matches_hashes: :param", "'clinical_id': 1 } results = await perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection) return [result for", "{'$in': chunk}}, update={'$set': {'is_disabled': False, '_updated': updated_time}})) return ops def get_matches_to_insert(matches_by_sample_id: list, existing_hashes:", "in existing if result['is_disabled']} # insert new matches if they don't already exist.", "projection=projection) return [result for result in results] async def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine)", "= {result['hash'] for result in existing} disabled = {result['hash'] for result in existing", "- clinical_ids query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\": { '$in': [clinical_id for clinical_id", "have the same hashes as newly found matches. :param matchengine: :param new_matches_hashes: :return:", "get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) -> list: \"\"\" Get matches in db which have", "chunk_list from matchengine.internals.utilities.utilities import perform_db_call logging.basicConfig(level=logging.INFO) log = logging.getLogger('matchengine') if TYPE_CHECKING: from matchengine.internals.engine", "issue queries matches_to_disable = await get_all_except(matchengine, protocol_no, matches_by_sample_id) delete_ops = await get_delete_ops(matches_to_disable, matchengine)", "'sample_id': sample_id, 'is_disabled': False, 'hash': { '$nin': new_matches_hashes } } matches_to_disable_query = MongoQuery(query)", "ops = [InsertOne(document=trial_match) for trial_match in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no))", "clinical ids with matches clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()} # if", "for matches in matches_by_sample_id.values(): for match in matches: match['_updated'] = updated_time if protocol_no", "in clinical_ids] } } projection = { '_id': 1, 'hash': 1, 'clinical_id': 1", "not in the current run. if protocol_no in matchengine.clinical_run_log_entries: clinical_ids = matchengine.clinical_run_log_entries[protocol_no] -", "= await get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for sample_id in matches_by_sample_id.keys(): if not matchengine.drop:", "matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for sample_id in matches_by_sample_id.keys(): if not matchengine.drop: new_matches_hashes = [match['hash']", "list) -> list: \"\"\" Get matches in db which have the same hashes", "existing matches in db with identical hashes to newly found matches existing =", "in matches_to_disable] ops = list() for chunk in chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}},", "db with identical hashes to newly found matches existing = await get_existing_matches(matchengine, new_matches_hashes)", "import perform_db_call logging.basicConfig(level=logging.INFO) log = logging.getLogger('matchengine') if TYPE_CHECKING: from matchengine.internals.engine import MatchEngine async", "if protocol_no in matchengine.clinical_run_log_entries: clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids query = { matchengine.match_criteria_transform.match_trial_link_id:", "new match generated during run matches hash of an existing matches_to_mark_available = [m", "dict) -> list: \"\"\"Return all matches except ones matching current protocol_no\"\"\" # get", "{'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})) return ops async def get_existing_matches(matchengine: MatchEngine,", "'_updated': updated_time}})) return ops def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set, sample_id: str) -> list:", "if protocol has been run previously, subtract clinical ids from current run from", "MongoQuery from matchengine.internals.utilities.list_utils import chunk_list from matchengine.internals.utilities.utilities import perform_db_call logging.basicConfig(level=logging.INFO) log = logging.getLogger('matchengine')", "the same hashes as newly found matches. :param matchengine: :param new_matches_hashes: :return: \"\"\"", ":param new_matches_hashes: :param protocol_no: :param sample_id: :return: \"\"\" query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no,", "matchengine) else: ops = [InsertOne(document=trial_match) for trial_match in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if not", "Update trial matches by diff'ing the newly created trial matches against existing matches", "'_updated': updated_time}})], protocol_no ) ) else: # Get matches to disable and issue", "projection = { '_id': 1, 'hash': 1, 'clinical_id': 1 } results = await", "insert new matches if they don't already exist. disable everything else matches_to_insert =", "protocol_no, sample_id) # flip is_disabled flag if a new match generated during run", "to disable and issue queries matches_to_disable = await get_all_except(matchengine, protocol_no, matches_by_sample_id) delete_ops =", "ops.append(InsertOne(document=to_insert)) available_hashes = [trial_match['hash'] for trial_match in matches_to_mark_available] for chunk in chunk_list(available_hashes, matchengine.chunk_size):", "run. if protocol_no in matchengine.clinical_run_log_entries: clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids query = {", "else: ops = [InsertOne(document=trial_match) for trial_match in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if not matchengine.skip_run_log_entry:", "{matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()} # if protocol has been run previously, subtract", "import logging from typing import TYPE_CHECKING from pymongo import UpdateMany, InsertOne from matchengine.internals.typing.matchengine_types", "in results] async def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) -> list: updated_time = datetime.datetime.now()", "not in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched on, not updating {matchengine.match_criteria_transform.trial_collection} matches\")", "if a new match generated during run matches hash of an existing matches_to_mark_available", "matches to disable by looking for existing, enabled matches whose hashes are not", "= await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query, projection) ) return matches[0] def get_update_operations(matches_to_disable: list,", "chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': False, '_updated': updated_time}})) return ops def", "import UpdateMany, InsertOne from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery from matchengine.internals.utilities.list_utils import chunk_list", "1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query, projection) ) return matches[0] def", "ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': False, '_updated': updated_time}})) return ops def get_matches_to_insert(matches_by_sample_id: list,", "match in matches: match['_updated'] = updated_time if protocol_no not in matchengine.matches or protocol_no", "UpdateMany, InsertOne from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery from matchengine.internals.utilities.list_utils import chunk_list from", "\"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query, projection) ) return matches[0]", "{\"is_disabled\": True, '_updated': updated_time}})) return ops async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) ->", "run matches hash of an existing matches_to_mark_available = [m for m in matches_by_sample_id[sample_id]", "dict()) updated_time = datetime.datetime.now() for matches in matches_by_sample_id.values(): for match in matches: match['_updated']", "projection = {\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query,", "{'is_disabled': True, '_updated': updated_time}})) for to_insert in matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes = [trial_match['hash'] for", "True, '_updated': updated_time}})], protocol_no ) ) else: # Get matches to disable and", "not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() async def get_all_except(matchengine: MatchEngine, protocol_no: str, trial_matches_by_sample_id: dict)", ") return matches[0] async def get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes: list, protocol_no: str, sample_id: str)", "matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() async def get_all_except(matchengine: MatchEngine, protocol_no:", "{'$in': new_matches_hashes}}) projection = {\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine,", "chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': True, '_updated': updated_time}})) for to_insert in", "matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': True, '_updated': updated_time}})) for to_insert in matches_to_insert:", "[clinical_id for clinical_id in clinical_ids] } } projection = { '_id': 1, 'hash':", "existing if result['is_disabled']} # insert new matches if they don't already exist. disable", "updating {matchengine.match_criteria_transform.trial_collection} matches\") if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() return log.info(f\"Updating matches for", "run previously, but not in the current run. if protocol_no in matchengine.clinical_run_log_entries: clinical_ids", "list, matchengine: MatchEngine) -> list: ops = list() updated_time = datetime.datetime.now() disable_hashes =", "ones matching current protocol_no\"\"\" # get clinical ids with matches clinical_ids = {matchengine.sample_mapping[sample_id]", "the newly created trial matches against existing matches in the db. Delete matches", "False, 'hash': { '$nin': new_matches_hashes } } matches_to_disable_query = MongoQuery(query) projection = {\"hash\":", "result in matches_to_disable] ops = list() for chunk in chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in':", "updated_time = datetime.datetime.now() hashes = [result['hash'] for result in matches_to_disable] ops = list()", "await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query, projection) ) return matches[0] async def get_matches_to_disable(matchengine: MatchEngine,", "disable by looking for existing, enabled matches whose hashes are not present in", "protocol_no, \"clinical_id\": { '$in': [clinical_id for clinical_id in clinical_ids] } } projection =", "= await get_matches_to_disable(matchengine, new_matches_hashes, protocol_no, sample_id) # flip is_disabled flag if a new", "= logging.getLogger('matchengine') if TYPE_CHECKING: from matchengine.internals.engine import MatchEngine async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no:", "\"\"\"Return all matches except ones matching current protocol_no\"\"\" # get clinical ids with", "for {protocol_no}\") if not matchengine.drop: # If no matches are found, disable all", "Get matches to disable and issue queries matches_to_disable = await get_all_except(matchengine, protocol_no, matches_by_sample_id)", "'$in': [clinical_id for clinical_id in clinical_ids] } } projection = { '_id': 1,", "in db which have the same hashes as newly found matches. :param matchengine:", "by adding {is_disabled: true} and insert all new matches. \"\"\" matches_by_sample_id = matchengine.matches.get(protocol_no,", "matchengine.drop: # If no matches are found, disable all match records by sample", "matchengine.matches[protocol_no]: for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id': {'$in': chunk}},", "matches to disable and issue queries matches_to_disable = await get_all_except(matchengine, protocol_no, matches_by_sample_id) delete_ops", "protocol_no ) ) else: # Get matches to disable and issue queries matches_to_disable", "not matched on, not updating {matchengine.match_criteria_transform.trial_collection} matches\") if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join()", "during run matches hash of an existing matches_to_mark_available = [m for m in", "sample_id: str) -> list: return [m for m in matches_by_sample_id[sample_id] if m['hash'] not", "'clinical_id': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})], protocol_no ) ) else: #", "matchengine.trial_match_collection, matches_to_disable_query, projection) ) return matches[0] def get_update_operations(matches_to_disable: list, matches_to_insert: list, matches_to_mark_available: list,", "matches are found, disable all match records by sample id if not matchengine.matches[protocol_no]:", "chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})], protocol_no ) ) else: # Get matches", "return log.info(f\"Updating matches for {protocol_no}\") if not matchengine.drop: # If no matches are", "matches_to_disable_query = MongoQuery(query) projection = {\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather(", "matchengine.trial_match_collection, matches_to_not_change_query, projection) ) return matches[0] async def get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes: list, protocol_no:", "from matchengine.internals.utilities.list_utils import chunk_list from matchengine.internals.utilities.utilities import perform_db_call logging.basicConfig(level=logging.INFO) log = logging.getLogger('matchengine') if", "protocol_no, 'clinical_id': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})], protocol_no ) ) else:", "1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query, projection) ) return", "an existing matches_to_mark_available = [m for m in matches_by_sample_id[sample_id] if m['hash'] in disabled]", "matches_to_not_change_query, projection) ) return matches[0] async def get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes: list, protocol_no: str,", "'$nin': new_matches_hashes } } matches_to_disable_query = MongoQuery(query) projection = {\"hash\": 1, \"is_disabled\": 1}", "in the current run. if protocol_no in matchengine.clinical_run_log_entries: clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids", "matched on, not updating {matchengine.match_criteria_transform.trial_collection} matches\") if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() return", "= await get_existing_matches(matchengine, new_matches_hashes) existing_hashes = {result['hash'] for result in existing} disabled =", "\"\"\" Get matches in db which have the same hashes as newly found", "UpdateTask, MongoQuery from matchengine.internals.utilities.list_utils import chunk_list from matchengine.internals.utilities.utilities import perform_db_call logging.basicConfig(level=logging.INFO) log =", "= [m for m in matches_by_sample_id[sample_id] if m['hash'] in disabled] ops = get_update_operations(matches_to_disable,", "list, protocol_no: str, sample_id: str) -> list: \"\"\" Get matches to disable by", "protocol_no: :param sample_id: :return: \"\"\" query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id': sample_id, 'is_disabled':", ":param sample_id: :return: \"\"\" query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id': sample_id, 'is_disabled': False,", "matches in db with identical hashes to newly found matches existing = await", "asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query, projection) ) return matches[0] async def get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes:", "} matches_to_disable_query = MongoQuery(query) projection = {\"hash\": 1, \"is_disabled\": 1} matches = await", "hashes as newly found matches. :param matchengine: :param new_matches_hashes: :return: \"\"\" matches_to_not_change_query =", "logging from typing import TYPE_CHECKING from pymongo import UpdateMany, InsertOne from matchengine.internals.typing.matchengine_types import", "1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query, projection) ) return matches[0] async", "updated_time if protocol_no not in matchengine.matches or protocol_no not in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no}", "except ones matching current protocol_no\"\"\" # get clinical ids with matches clinical_ids =", "TYPE_CHECKING from pymongo import UpdateMany, InsertOne from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery from", "available_hashes = [trial_match['hash'] for trial_match in matches_to_mark_available] for chunk in chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash':", "matches in the db. Delete matches by adding {is_disabled: true} and insert all", "await get_matches_to_disable(matchengine, new_matches_hashes, protocol_no, sample_id) # flip is_disabled flag if a new match", "from current run from # previously run clinical ids for a specific protocol.", "MatchEngine) -> list: ops = list() updated_time = datetime.datetime.now() disable_hashes = [trial_match['hash'] for", "await matchengine.task_q.join() return log.info(f\"Updating matches for {protocol_no}\") if not matchengine.drop: # If no", "existing matches in the db. Delete matches by adding {is_disabled: true} and insert", "{ matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id': sample_id, 'is_disabled': False, 'hash': { '$nin': new_matches_hashes } }", "identical hashes to newly found matches existing = await get_existing_matches(matchengine, new_matches_hashes) existing_hashes =", "clinical_id in clinical_ids] } } projection = { '_id': 1, 'hash': 1, 'clinical_id':", "async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) -> list: \"\"\" Get matches in db", "'_updated': updated_time}})) for to_insert in matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes = [trial_match['hash'] for trial_match in", "in matches_by_sample_id.values(): for match in matches: match['_updated'] = updated_time if protocol_no not in", "all new matches. \"\"\" matches_by_sample_id = matchengine.matches.get(protocol_no, dict()) updated_time = datetime.datetime.now() for matches", "looking for existing, enabled matches whose hashes are not present in newly generated", "collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection) return [result for result in results] async def get_delete_ops(matches_to_disable: list,", "'_id': 1, 'hash': 1, 'clinical_id': 1 } results = await perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query),", "async def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) -> list: updated_time = datetime.datetime.now() hashes =", "trial_match in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() async def", "matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}}) projection = {\"hash\": 1, \"is_disabled\": 1} matches =", "\"\"\" Get matches to disable by looking for existing, enabled matches whose hashes", "1, 'clinical_id': 1 } results = await perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection) return [result", "[match['hash'] for match in matches_by_sample_id[sample_id]] # get existing matches in db with identical", "query=MongoQuery(query), projection=projection) return [result for result in results] async def get_delete_ops(matches_to_disable: list, matchengine:", "existing_hashes, sample_id) matches_to_disable = await get_matches_to_disable(matchengine, new_matches_hashes, protocol_no, sample_id) # flip is_disabled flag", "updated_time = datetime.datetime.now() for matches in matches_by_sample_id.values(): for match in matches: match['_updated'] =", "hash of an existing matches_to_mark_available = [m for m in matches_by_sample_id[sample_id] if m['hash']", "matches hash of an existing matches_to_mark_available = [m for m in matches_by_sample_id[sample_id] if", "asyncio import datetime import logging from typing import TYPE_CHECKING from pymongo import UpdateMany,", "} results = await perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection) return [result for result in", "= get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available, matchengine) else: ops = [InsertOne(document=trial_match) for trial_match in matches_by_sample_id[sample_id]]", "during current run. Done for every sample_id :param matchengine: :param new_matches_hashes: :param protocol_no:", "{'$in': chunk}}, update={'$set': {'is_disabled': True, '_updated': updated_time}})) for to_insert in matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes", "delete_ops = await get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for sample_id in matches_by_sample_id.keys(): if not", "clinical ids from current run from # previously run clinical ids for a", "everything else matches_to_insert = get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id) matches_to_disable = await get_matches_to_disable(matchengine, new_matches_hashes, protocol_no,", "def get_all_except(matchengine: MatchEngine, protocol_no: str, trial_matches_by_sample_id: dict) -> list: \"\"\"Return all matches except", "annotations import asyncio import datetime import logging from typing import TYPE_CHECKING from pymongo", "{'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})], protocol_no ) ) else: # Get", "[result for result in results] async def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) -> list:", "in matches_by_sample_id.keys(): if not matchengine.drop: new_matches_hashes = [match['hash'] for match in matches_by_sample_id[sample_id]] #", "are ids # which were run previously, but not in the current run.", "matches for {protocol_no}\") if not matchengine.drop: # If no matches are found, disable", "result['is_disabled']} # insert new matches if they don't already exist. disable everything else", "RunLogUpdateTask, UpdateTask, MongoQuery from matchengine.internals.utilities.list_utils import chunk_list from matchengine.internals.utilities.utilities import perform_db_call logging.basicConfig(level=logging.INFO) log", "matchengine.drop: new_matches_hashes = [match['hash'] for match in matches_by_sample_id[sample_id]] # get existing matches in", "new matches. \"\"\" matches_by_sample_id = matchengine.matches.get(protocol_no, dict()) updated_time = datetime.datetime.now() for matches in", "projection = {\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query,", "not present in newly generated matches during current run. Done for every sample_id", "in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched on, not updating {matchengine.match_criteria_transform.trial_collection} matches\") if", "MatchEngine async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str): \"\"\" Update trial matches by diff'ing", "clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()} # if protocol has been run", "# get existing matches in db with identical hashes to newly found matches", "trial_matches_by_sample_id: dict) -> list: \"\"\"Return all matches except ones matching current protocol_no\"\"\" #", "matchengine: MatchEngine) -> list: ops = list() updated_time = datetime.datetime.now() disable_hashes = [trial_match['hash']", "protocol_no: str): \"\"\" Update trial matches by diff'ing the newly created trial matches", "disabled = {result['hash'] for result in existing if result['is_disabled']} # insert new matches", "matches_to_insert: list, matches_to_mark_available: list, matchengine: MatchEngine) -> list: ops = list() updated_time =", "# flip is_disabled flag if a new match generated during run matches hash", "in matches_by_sample_id[sample_id]] # get existing matches in db with identical hashes to newly", "current run. Done for every sample_id :param matchengine: :param new_matches_hashes: :param protocol_no: :param", "UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})], protocol_no )", "of an existing matches_to_mark_available = [m for m in matches_by_sample_id[sample_id] if m['hash'] in", "with matches clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()} # if protocol has", "chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id': {'$in': chunk}}, update={'$set': {\"is_disabled\":", "get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) -> list: updated_time = datetime.datetime.now() hashes = [result['hash'] for", "true} and insert all new matches. \"\"\" matches_by_sample_id = matchengine.matches.get(protocol_no, dict()) updated_time =", "found matches. :param matchengine: :param new_matches_hashes: :return: \"\"\" matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}})", "MatchEngine, new_matches_hashes: list, protocol_no: str, sample_id: str) -> list: \"\"\" Get matches to", "if m['hash'] in disabled] ops = get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available, matchengine) else: ops =", "for to_insert in matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes = [trial_match['hash'] for trial_match in matches_to_mark_available] for", "str): \"\"\" Update trial matches by diff'ing the newly created trial matches against", "typing import TYPE_CHECKING from pymongo import UpdateMany, InsertOne from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask,", "= updated_time if protocol_no not in matchengine.matches or protocol_no not in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection}", "trial_matches_by_sample_id.keys()} # if protocol has been run previously, subtract clinical ids from current", "for sample_id in matches_by_sample_id.keys(): if not matchengine.drop: new_matches_hashes = [match['hash'] for match in", "sample_id: str) -> list: \"\"\" Get matches to disable by looking for existing,", "matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() async def get_all_except(matchengine: MatchEngine,", "get_matches_to_disable(matchengine, new_matches_hashes, protocol_no, sample_id) # flip is_disabled flag if a new match generated", "matches_to_disable = await get_matches_to_disable(matchengine, new_matches_hashes, protocol_no, sample_id) # flip is_disabled flag if a", "clinical_ids query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\": { '$in': [clinical_id for clinical_id in", "async def get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes: list, protocol_no: str, sample_id: str) -> list: \"\"\"", "generated matches during current run. Done for every sample_id :param matchengine: :param new_matches_hashes:", "records by sample id if not matchengine.matches[protocol_no]: for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait(", "subtract clinical ids from current run from # previously run clinical ids for", "for a specific protocol. The remainder are ids # which were run previously,", "if result['is_disabled']} # insert new matches if they don't already exist. disable everything", "match records by sample id if not matchengine.matches[protocol_no]: for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size):", "= { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id': sample_id, 'is_disabled': False, 'hash': { '$nin': new_matches_hashes }", "{protocol_no} was not matched on, not updating {matchengine.match_criteria_transform.trial_collection} matches\") if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no))", "get existing matches in db with identical hashes to newly found matches existing", "get_all_except(matchengine: MatchEngine, protocol_no: str, trial_matches_by_sample_id: dict) -> list: \"\"\"Return all matches except ones", "new_matches_hashes: :return: \"\"\" matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}}) projection = {\"hash\": 1, \"is_disabled\":", "match generated during run matches hash of an existing matches_to_mark_available = [m for", "remainder are ids # which were run previously, but not in the current", "results = await perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection) return [result for result in results]", "get_update_operations(matches_to_disable: list, matches_to_insert: list, matches_to_mark_available: list, matchengine: MatchEngine) -> list: ops = list()", "matchengine.task_q.join() return log.info(f\"Updating matches for {protocol_no}\") if not matchengine.drop: # If no matches", "matchengine.matches.get(protocol_no, dict()) updated_time = datetime.datetime.now() for matches in matches_by_sample_id.values(): for match in matches:", "matches_by_sample_id) delete_ops = await get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for sample_id in matches_by_sample_id.keys(): if", "clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\": { '$in':", "existing} disabled = {result['hash'] for result in existing if result['is_disabled']} # insert new", "existing, enabled matches whose hashes are not present in newly generated matches during", "MatchEngine, protocol_no: str): \"\"\" Update trial matches by diff'ing the newly created trial", "perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection) return [result for result in results] async def get_delete_ops(matches_to_disable:", "sample_id :param matchengine: :param new_matches_hashes: :param protocol_no: :param sample_id: :return: \"\"\" query =", "insert all new matches. \"\"\" matches_by_sample_id = matchengine.matches.get(protocol_no, dict()) updated_time = datetime.datetime.now() for", "new_matches_hashes: list) -> list: \"\"\" Get matches in db which have the same", "= list() for chunk in chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {\"is_disabled\": True,", "import MatchEngine async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str): \"\"\" Update trial matches by", "matches: match['_updated'] = updated_time if protocol_no not in matchengine.matches or protocol_no not in", "from __future__ import annotations import asyncio import datetime import logging from typing import", "matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for sample_id in matches_by_sample_id.keys(): if not matchengine.drop: new_matches_hashes = [match['hash'] for", "don't already exist. disable everything else matches_to_insert = get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id) matches_to_disable =", "matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})],", "matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query, projection) ) return matches[0] async def", "enabled matches whose hashes are not present in newly generated matches during current", "as newly found matches. :param matchengine: :param new_matches_hashes: :return: \"\"\" matches_to_not_change_query = MongoQuery({'hash':", ":param new_matches_hashes: :return: \"\"\" matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}}) projection = {\"hash\": 1,", "} } matches_to_disable_query = MongoQuery(query) projection = {\"hash\": 1, \"is_disabled\": 1} matches =", "trial matches by diff'ing the newly created trial matches against existing matches in", "matchengine.internals.utilities.list_utils import chunk_list from matchengine.internals.utilities.utilities import perform_db_call logging.basicConfig(level=logging.INFO) log = logging.getLogger('matchengine') if TYPE_CHECKING:", "diff'ing the newly created trial matches against existing matches in the db. Delete", "for match in matches_by_sample_id[sample_id]] # get existing matches in db with identical hashes", "updated_time}})) return ops def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set, sample_id: str) -> list: return", "matches_by_sample_id = matchengine.matches.get(protocol_no, dict()) updated_time = datetime.datetime.now() for matches in matches_by_sample_id.values(): for match", "protocol_no, 'sample_id': sample_id, 'is_disabled': False, 'hash': { '$nin': new_matches_hashes } } matches_to_disable_query =", "chunk in chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})) return", "for every sample_id :param matchengine: :param new_matches_hashes: :param protocol_no: :param sample_id: :return: \"\"\"", "in newly generated matches during current run. Done for every sample_id :param matchengine:", "datetime.datetime.now() for matches in matches_by_sample_id.values(): for match in matches: match['_updated'] = updated_time if", "async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str): \"\"\" Update trial matches by diff'ing the newly created", "no matches are found, disable all match records by sample id if not", "disable all match records by sample id if not matchengine.matches[protocol_no]: for chunk in", "matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})], protocol_no", "matchengine.clinical_run_log_entries[protocol_no] - clinical_ids query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\": { '$in': [clinical_id for", "log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched on, not updating {matchengine.match_criteria_transform.trial_collection} matches\") if not matchengine.skip_run_log_entry:", "return matches[0] async def get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes: list, protocol_no: str, sample_id: str) ->", "from matchengine.internals.engine import MatchEngine async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str): \"\"\" Update trial", "in matchengine.matches or protocol_no not in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched on,", ":param protocol_no: :param sample_id: :return: \"\"\" query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id': sample_id,", "'hash': 1, 'clinical_id': 1 } results = await perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection) return", "log = logging.getLogger('matchengine') if TYPE_CHECKING: from matchengine.internals.engine import MatchEngine async def async_update_matches_by_protocol_no(matchengine: MatchEngine,", "in the db. Delete matches by adding {is_disabled: true} and insert all new", "protocol_no, matches_by_sample_id) delete_ops = await get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for sample_id in matches_by_sample_id.keys():", "matchengine.clinical_run_log_entries: clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\": {", "protocol has been run previously, subtract clinical ids from current run from #", ") ) else: # Get matches to disable and issue queries matches_to_disable =", "= {\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query, projection)", "get_existing_matches(matchengine, new_matches_hashes) existing_hashes = {result['hash'] for result in existing} disabled = {result['hash'] for", "from pymongo import UpdateMany, InsertOne from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery from matchengine.internals.utilities.list_utils", "ops = list() for chunk in chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {\"is_disabled\":", "ids from current run from # previously run clinical ids for a specific", "has been run previously, subtract clinical ids from current run from # previously", "newly found matches. :param matchengine: :param new_matches_hashes: :return: \"\"\" matches_to_not_change_query = MongoQuery({'hash': {'$in':", "for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id': {'$in': chunk}}, update={'$set':", "def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str): \"\"\" Update trial matches by diff'ing the newly", "been run previously, subtract clinical ids from current run from # previously run", "matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})) return ops async def", "for trial_match in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() async", "ops def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set, sample_id: str) -> list: return [m for", ":return: \"\"\" matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}}) projection = {\"hash\": 1, \"is_disabled\": 1}", "list: \"\"\"Return all matches except ones matching current protocol_no\"\"\" # get clinical ids", "same hashes as newly found matches. :param matchengine: :param new_matches_hashes: :return: \"\"\" matches_to_not_change_query", "matchengine.internals.engine import MatchEngine async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str): \"\"\" Update trial matches", "for clinical_id in clinical_ids] } } projection = { '_id': 1, 'hash': 1,", "every sample_id :param matchengine: :param new_matches_hashes: :param protocol_no: :param sample_id: :return: \"\"\" query", "\"\"\" query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id': sample_id, 'is_disabled': False, 'hash': { '$nin':", "matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() async def get_all_except(matchengine: MatchEngine, protocol_no: str, trial_matches_by_sample_id: dict) ->", "chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})) return ops async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes:", "list, existing_hashes: set, sample_id: str) -> list: return [m for m in matches_by_sample_id[sample_id]", "matches_by_sample_id[sample_id]] # get existing matches in db with identical hashes to newly found", "all matches except ones matching current protocol_no\"\"\" # get clinical ids with matches", "matches whose hashes are not present in newly generated matches during current run.", "list() updated_time = datetime.datetime.now() disable_hashes = [trial_match['hash'] for trial_match in matches_to_disable] for chunk", "-> list: updated_time = datetime.datetime.now() hashes = [result['hash'] for result in matches_to_disable] ops", "for existing, enabled matches whose hashes are not present in newly generated matches", "protocol. The remainder are ids # which were run previously, but not in", "in trial_matches_by_sample_id.keys()} # if protocol has been run previously, subtract clinical ids from", "were run previously, but not in the current run. if protocol_no in matchengine.clinical_run_log_entries:", "list: ops = list() updated_time = datetime.datetime.now() disable_hashes = [trial_match['hash'] for trial_match in", "protocol_no)) if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() async def get_all_except(matchengine: MatchEngine, protocol_no: str,", "flip is_disabled flag if a new match generated during run matches hash of", "matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() return log.info(f\"Updating matches for {protocol_no}\") if not matchengine.drop: #", "return ops def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set, sample_id: str) -> list: return [m", "matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() return log.info(f\"Updating matches for {protocol_no}\") if not matchengine.drop: # If", "return [result for result in results] async def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) ->", "get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set, sample_id: str) -> list: return [m for m in", "the db. Delete matches by adding {is_disabled: true} and insert all new matches.", "matches existing = await get_existing_matches(matchengine, new_matches_hashes) existing_hashes = {result['hash'] for result in existing}", "new matches if they don't already exist. disable everything else matches_to_insert = get_matches_to_insert(matches_by_sample_id,", "hashes = [result['hash'] for result in matches_to_disable] ops = list() for chunk in", "to newly found matches existing = await get_existing_matches(matchengine, new_matches_hashes) existing_hashes = {result['hash'] for", "result in existing if result['is_disabled']} # insert new matches if they don't already", "in disabled] ops = get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available, matchengine) else: ops = [InsertOne(document=trial_match) for", "from typing import TYPE_CHECKING from pymongo import UpdateMany, InsertOne from matchengine.internals.typing.matchengine_types import RunLogUpdateTask,", "{\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query, projection) )", "protocol_no\"\"\" # get clinical ids with matches clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id in", "which have the same hashes as newly found matches. :param matchengine: :param new_matches_hashes:", "queries matches_to_disable = await get_all_except(matchengine, protocol_no, matches_by_sample_id) delete_ops = await get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops,", "# Get matches to disable and issue queries matches_to_disable = await get_all_except(matchengine, protocol_no,", "present in newly generated matches during current run. Done for every sample_id :param", "ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': True, '_updated': updated_time}})) for to_insert in matches_to_insert: ops.append(InsertOne(document=to_insert))", "If no matches are found, disable all match records by sample id if", "if not matchengine.matches[protocol_no]: for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id':", "matchengine.task_q.join() async def get_all_except(matchengine: MatchEngine, protocol_no: str, trial_matches_by_sample_id: dict) -> list: \"\"\"Return all", "'is_disabled': False, 'hash': { '$nin': new_matches_hashes } } matches_to_disable_query = MongoQuery(query) projection =", "pymongo import UpdateMany, InsertOne from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery from matchengine.internals.utilities.list_utils import", "# previously run clinical ids for a specific protocol. The remainder are ids", "[m for m in matches_by_sample_id[sample_id] if m['hash'] in disabled] ops = get_update_operations(matches_to_disable, matches_to_insert,", "-> list: ops = list() updated_time = datetime.datetime.now() disable_hashes = [trial_match['hash'] for trial_match", "datetime.datetime.now() hashes = [result['hash'] for result in matches_to_disable] ops = list() for chunk", "import annotations import asyncio import datetime import logging from typing import TYPE_CHECKING from", "protocol_no: str, sample_id: str) -> list: \"\"\" Get matches to disable by looking", "protocol_no not in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched on, not updating {matchengine.match_criteria_transform.trial_collection}", "= await get_all_except(matchengine, protocol_no, matches_by_sample_id) delete_ops = await get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for", "async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str): \"\"\" Update trial matches by diff'ing the", "matches. \"\"\" matches_by_sample_id = matchengine.matches.get(protocol_no, dict()) updated_time = datetime.datetime.now() for matches in matches_by_sample_id.values():", "def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) -> list: \"\"\" Get matches in db which", "str, sample_id: str) -> list: \"\"\" Get matches to disable by looking for", "ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})) return ops async def get_existing_matches(matchengine:", "current run. if protocol_no in matchengine.clinical_run_log_entries: clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids query =", "matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\": { '$in': [clinical_id for clinical_id in clinical_ids] } } projection", "import asyncio import datetime import logging from typing import TYPE_CHECKING from pymongo import", "[result['hash'] for result in matches_to_disable] ops = list() for chunk in chunk_list(hashes, matchengine.chunk_size):", "matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes = [trial_match['hash'] for trial_match in matches_to_mark_available] for chunk in chunk_list(available_hashes,", "matching current protocol_no\"\"\" # get clinical ids with matches clinical_ids = {matchengine.sample_mapping[sample_id] for", "Done for every sample_id :param matchengine: :param new_matches_hashes: :param protocol_no: :param sample_id: :return:", "# If no matches are found, disable all match records by sample id", "'hash': { '$nin': new_matches_hashes } } matches_to_disable_query = MongoQuery(query) projection = {\"hash\": 1,", "for sample_id in trial_matches_by_sample_id.keys()} # if protocol has been run previously, subtract clinical", "= await perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection) return [result for result in results] async", "matchengine.matches or protocol_no not in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched on, not", "{is_disabled: true} and insert all new matches. \"\"\" matches_by_sample_id = matchengine.matches.get(protocol_no, dict()) updated_time", "from # previously run clinical ids for a specific protocol. The remainder are", "for trial_match in matches_to_disable] for chunk in chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set':", "{result['hash'] for result in existing} disabled = {result['hash'] for result in existing if", "from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery from matchengine.internals.utilities.list_utils import chunk_list from matchengine.internals.utilities.utilities import", "for m in matches_by_sample_id[sample_id] if m['hash'] in disabled] ops = get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available,", "generated during run matches hash of an existing matches_to_mark_available = [m for m", "all match records by sample id if not matchengine.matches[protocol_no]: for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]),", "m in matches_by_sample_id[sample_id] if m['hash'] in disabled] ops = get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available, matchengine)", "sample_id) # flip is_disabled flag if a new match generated during run matches", "get clinical ids with matches clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()} #", "= MongoQuery({'hash': {'$in': new_matches_hashes}}) projection = {\"hash\": 1, \"is_disabled\": 1} matches = await", "are not present in newly generated matches during current run. Done for every", "trial_match in matches_to_disable] for chunk in chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled':", "in db with identical hashes to newly found matches existing = await get_existing_matches(matchengine,", "chunk}}, update={'$set': {'is_disabled': True, '_updated': updated_time}})) for to_insert in matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes =", "await get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for sample_id in matches_by_sample_id.keys(): if not matchengine.drop: new_matches_hashes", "whose hashes are not present in newly generated matches during current run. Done", "matchengine: :param new_matches_hashes: :param protocol_no: :param sample_id: :return: \"\"\" query = { matchengine.match_criteria_transform.match_trial_link_id:", "matches_to_mark_available] for chunk in chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': False, '_updated':", ") else: # Get matches to disable and issue queries matches_to_disable = await", "on, not updating {matchengine.match_criteria_transform.trial_collection} matches\") if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() return log.info(f\"Updating", "{protocol_no}\") if not matchengine.drop: # If no matches are found, disable all match", "[trial_match['hash'] for trial_match in matches_to_disable] for chunk in chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}},", "existing_hashes = {result['hash'] for result in existing} disabled = {result['hash'] for result in", "for result in existing if result['is_disabled']} # insert new matches if they don't", "ops async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) -> list: \"\"\" Get matches in", "adding {is_disabled: true} and insert all new matches. \"\"\" matches_by_sample_id = matchengine.matches.get(protocol_no, dict())", "matchengine: :param new_matches_hashes: :return: \"\"\" matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}}) projection = {\"hash\":", "disabled] ops = get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available, matchengine) else: ops = [InsertOne(document=trial_match) for trial_match", "and issue queries matches_to_disable = await get_all_except(matchengine, protocol_no, matches_by_sample_id) delete_ops = await get_delete_ops(matches_to_disable,", "= [result['hash'] for result in matches_to_disable] ops = list() for chunk in chunk_list(hashes,", "datetime import logging from typing import TYPE_CHECKING from pymongo import UpdateMany, InsertOne from", "sample_id in matches_by_sample_id.keys(): if not matchengine.drop: new_matches_hashes = [match['hash'] for match in matches_by_sample_id[sample_id]]", "new_matches_hashes: list, protocol_no: str, sample_id: str) -> list: \"\"\" Get matches to disable", "in existing} disabled = {result['hash'] for result in existing if result['is_disabled']} # insert", "current run from # previously run clinical ids for a specific protocol. The", "id if not matchengine.matches[protocol_no]: for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no,", "} } projection = { '_id': 1, 'hash': 1, 'clinical_id': 1 } results", "InsertOne from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery from matchengine.internals.utilities.list_utils import chunk_list from matchengine.internals.utilities.utilities", "asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query, projection) ) return matches[0] def get_update_operations(matches_to_disable: list, matches_to_insert: list,", "current protocol_no\"\"\" # get clinical ids with matches clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id", "matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id': sample_id, 'is_disabled': False, 'hash': { '$nin': new_matches_hashes } } matches_to_disable_query", "str) -> list: return [m for m in matches_by_sample_id[sample_id] if m['hash'] not in", "by looking for existing, enabled matches whose hashes are not present in newly", "= datetime.datetime.now() disable_hashes = [trial_match['hash'] for trial_match in matches_to_disable] for chunk in chunk_list(disable_hashes,", "else matches_to_insert = get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id) matches_to_disable = await get_matches_to_disable(matchengine, new_matches_hashes, protocol_no, sample_id)", "= matchengine.matches.get(protocol_no, dict()) updated_time = datetime.datetime.now() for matches in matches_by_sample_id.values(): for match in", "in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id': {'$in': chunk}}, update={'$set': {\"is_disabled\": True,", "projection) ) return matches[0] async def get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes: list, protocol_no: str, sample_id:", "ops = get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available, matchengine) else: ops = [InsertOne(document=trial_match) for trial_match in", "matches_to_mark_available = [m for m in matches_by_sample_id[sample_id] if m['hash'] in disabled] ops =", "import chunk_list from matchengine.internals.utilities.utilities import perform_db_call logging.basicConfig(level=logging.INFO) log = logging.getLogger('matchengine') if TYPE_CHECKING: from", "MongoQuery(query) projection = {\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection,", "protocol_no)) for sample_id in matches_by_sample_id.keys(): if not matchengine.drop: new_matches_hashes = [match['hash'] for match", "for result in results] async def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) -> list: updated_time", "list: updated_time = datetime.datetime.now() hashes = [result['hash'] for result in matches_to_disable] ops =", "log.info(f\"Updating matches for {protocol_no}\") if not matchengine.drop: # If no matches are found,", "query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\": { '$in': [clinical_id for clinical_id in clinical_ids]", "found matches existing = await get_existing_matches(matchengine, new_matches_hashes) existing_hashes = {result['hash'] for result in", "= {matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()} # if protocol has been run previously,", "updated_time}})) return ops async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) -> list: \"\"\" Get", "in matches_to_disable] for chunk in chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': True,", "\"\"\" matches_by_sample_id = matchengine.matches.get(protocol_no, dict()) updated_time = datetime.datetime.now() for matches in matches_by_sample_id.values(): for", "matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': False, '_updated': updated_time}})) return ops def get_matches_to_insert(matches_by_sample_id:", "created trial matches against existing matches in the db. Delete matches by adding", "matches_to_mark_available, matchengine) else: ops = [InsertOne(document=trial_match) for trial_match in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if", "run previously, subtract clinical ids from current run from # previously run clinical", "match in matches_by_sample_id[sample_id]] # get existing matches in db with identical hashes to", "{'is_disabled': False, '_updated': updated_time}})) return ops def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set, sample_id: str)", "get_all_except(matchengine, protocol_no, matches_by_sample_id) delete_ops = await get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for sample_id in", "are found, disable all match records by sample id if not matchengine.matches[protocol_no]: for", "new_matches_hashes = [match['hash'] for match in matches_by_sample_id[sample_id]] # get existing matches in db", "get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id) matches_to_disable = await get_matches_to_disable(matchengine, new_matches_hashes, protocol_no, sample_id) # flip is_disabled", "ids with matches clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()} # if protocol", "protocol_no not in matchengine.matches or protocol_no not in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not", "matches_to_disable] for chunk in chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': True, '_updated':", "list() for chunk in chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated':", "matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched on, not updating {matchengine.match_criteria_transform.trial_collection} matches\") if not", "run clinical ids for a specific protocol. The remainder are ids # which", "matches in db which have the same hashes as newly found matches. :param", "chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated':", "await get_existing_matches(matchengine, new_matches_hashes) existing_hashes = {result['hash'] for result in existing} disabled = {result['hash']", "not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() return log.info(f\"Updating matches for {protocol_no}\") if not matchengine.drop:", "matches clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()} # if protocol has been", "update={'$set': {'is_disabled': False, '_updated': updated_time}})) return ops def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set, sample_id:", "to disable by looking for existing, enabled matches whose hashes are not present", "-> list: \"\"\"Return all matches except ones matching current protocol_no\"\"\" # get clinical", "previously, subtract clinical ids from current run from # previously run clinical ids", "matches_to_disable_query, projection) ) return matches[0] def get_update_operations(matches_to_disable: list, matches_to_insert: list, matches_to_mark_available: list, matchengine:", "MatchEngine) -> list: updated_time = datetime.datetime.now() hashes = [result['hash'] for result in matches_to_disable]", "ids # which were run previously, but not in the current run. if", "query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id': sample_id, 'is_disabled': False, 'hash': { '$nin': new_matches_hashes", "Get matches in db which have the same hashes as newly found matches.", "flag if a new match generated during run matches hash of an existing", "= MongoQuery(query) projection = {\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine,", "ops = list() updated_time = datetime.datetime.now() disable_hashes = [trial_match['hash'] for trial_match in matches_to_disable]", "updated_time = datetime.datetime.now() disable_hashes = [trial_match['hash'] for trial_match in matches_to_disable] for chunk in", "else: # Get matches to disable and issue queries matches_to_disable = await get_all_except(matchengine,", "specific protocol. The remainder are ids # which were run previously, but not", "matches against existing matches in the db. Delete matches by adding {is_disabled: true}", "new_matches_hashes) existing_hashes = {result['hash'] for result in existing} disabled = {result['hash'] for result", ":param matchengine: :param new_matches_hashes: :return: \"\"\" matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}}) projection =", "chunk in chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': False, '_updated': updated_time}})) return", "for match in matches: match['_updated'] = updated_time if protocol_no not in matchengine.matches or", "get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for sample_id in matches_by_sample_id.keys(): if not matchengine.drop: new_matches_hashes =", "sample_id in trial_matches_by_sample_id.keys()} # if protocol has been run previously, subtract clinical ids", "-> list: \"\"\" Get matches in db which have the same hashes as", "against existing matches in the db. Delete matches by adding {is_disabled: true} and", "db. Delete matches by adding {is_disabled: true} and insert all new matches. \"\"\"", "existing matches_to_mark_available = [m for m in matches_by_sample_id[sample_id] if m['hash'] in disabled] ops", "with identical hashes to newly found matches existing = await get_existing_matches(matchengine, new_matches_hashes) existing_hashes", "newly created trial matches against existing matches in the db. Delete matches by", "exist. disable everything else matches_to_insert = get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id) matches_to_disable = await get_matches_to_disable(matchengine,", "# get clinical ids with matches clinical_ids = {matchengine.sample_mapping[sample_id] for sample_id in trial_matches_by_sample_id.keys()}", "= [trial_match['hash'] for trial_match in matches_to_mark_available] for chunk in chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in':", "a specific protocol. The remainder are ids # which were run previously, but", "'_updated': updated_time}})) return ops async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) -> list: \"\"\"", "datetime.datetime.now() disable_hashes = [trial_match['hash'] for trial_match in matches_to_disable] for chunk in chunk_list(disable_hashes, matchengine.chunk_size):", "{ '$in': [clinical_id for clinical_id in clinical_ids] } } projection = { '_id':", "and insert all new matches. \"\"\" matches_by_sample_id = matchengine.matches.get(protocol_no, dict()) updated_time = datetime.datetime.now()", "not updating {matchengine.match_criteria_transform.trial_collection} matches\") if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() return log.info(f\"Updating matches", "updated_time}})) for to_insert in matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes = [trial_match['hash'] for trial_match in matches_to_mark_available]", "update={'$set': {\"is_disabled\": True, '_updated': updated_time}})) return ops async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list)", "import TYPE_CHECKING from pymongo import UpdateMany, InsertOne from matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery", "await matchengine.task_q.join() async def get_all_except(matchengine: MatchEngine, protocol_no: str, trial_matches_by_sample_id: dict) -> list: \"\"\"Return", "trial matches against existing matches in the db. Delete matches by adding {is_disabled:", "\"clinical_id\": { '$in': [clinical_id for clinical_id in clinical_ids] } } projection = {", "MatchEngine, new_matches_hashes: list) -> list: \"\"\" Get matches in db which have the", "matches_to_insert = get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id) matches_to_disable = await get_matches_to_disable(matchengine, new_matches_hashes, protocol_no, sample_id) #", "= { '_id': 1, 'hash': 1, 'clinical_id': 1 } results = await perform_db_call(matchengine,", "# insert new matches if they don't already exist. disable everything else matches_to_insert", "matches_to_disable = await get_all_except(matchengine, protocol_no, matches_by_sample_id) delete_ops = await get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no))", "trial_match in matches_to_mark_available] for chunk in chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled':", "__future__ import annotations import asyncio import datetime import logging from typing import TYPE_CHECKING", "= datetime.datetime.now() hashes = [result['hash'] for result in matches_to_disable] ops = list() for", "protocol_no in matchengine.clinical_run_log_entries: clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no,", "in matchengine.clinical_run_log_entries: clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\":", "def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set, sample_id: str) -> list: return [m for m", "\"\"\" Update trial matches by diff'ing the newly created trial matches against existing", "if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() return log.info(f\"Updating matches for {protocol_no}\") if not", "= matchengine.clinical_run_log_entries[protocol_no] - clinical_ids query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\": { '$in': [clinical_id", "{\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query, projection) )", "for result in existing} disabled = {result['hash'] for result in existing if result['is_disabled']}", "matches if they don't already exist. disable everything else matches_to_insert = get_matches_to_insert(matches_by_sample_id, existing_hashes,", "hashes to newly found matches existing = await get_existing_matches(matchengine, new_matches_hashes) existing_hashes = {result['hash']", "disable everything else matches_to_insert = get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id) matches_to_disable = await get_matches_to_disable(matchengine, new_matches_hashes,", "= list() updated_time = datetime.datetime.now() disable_hashes = [trial_match['hash'] for trial_match in matches_to_disable] for", "list, matchengine: MatchEngine) -> list: updated_time = datetime.datetime.now() hashes = [result['hash'] for result", "logging.basicConfig(level=logging.INFO) log = logging.getLogger('matchengine') if TYPE_CHECKING: from matchengine.internals.engine import MatchEngine async def async_update_matches_by_protocol_no(matchengine:", "newly generated matches during current run. Done for every sample_id :param matchengine: :param", "\"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query, projection) ) return matches[0]", "if they don't already exist. disable everything else matches_to_insert = get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id)", "if not matchengine.drop: # If no matches are found, disable all match records", "ids for a specific protocol. The remainder are ids # which were run", "sample id if not matchengine.matches[protocol_no]: for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id:", "matches_to_insert, matches_to_mark_available, matchengine) else: ops = [InsertOne(document=trial_match) for trial_match in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no))", "# which were run previously, but not in the current run. if protocol_no", "from matchengine.internals.utilities.utilities import perform_db_call logging.basicConfig(level=logging.INFO) log = logging.getLogger('matchengine') if TYPE_CHECKING: from matchengine.internals.engine import", "def get_update_operations(matches_to_disable: list, matches_to_insert: list, matches_to_mark_available: list, matchengine: MatchEngine) -> list: ops =", "matches_to_disable] ops = list() for chunk in chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set':", "{ '_id': 1, 'hash': 1, 'clinical_id': 1 } results = await perform_db_call(matchengine, collection=matchengine.trial_match_collection,", "import RunLogUpdateTask, UpdateTask, MongoQuery from matchengine.internals.utilities.list_utils import chunk_list from matchengine.internals.utilities.utilities import perform_db_call logging.basicConfig(level=logging.INFO)", "for chunk in chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}}))", "in matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes = [trial_match['hash'] for trial_match in matches_to_mark_available] for chunk in", "matches_by_sample_id.keys(): if not matchengine.drop: new_matches_hashes = [match['hash'] for match in matches_by_sample_id[sample_id]] # get", "in chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})) return ops", "matches. :param matchengine: :param new_matches_hashes: :return: \"\"\" matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}}) projection", "list: \"\"\" Get matches to disable by looking for existing, enabled matches whose", "set, sample_id: str) -> list: return [m for m in matches_by_sample_id[sample_id] if m['hash']", "sample_id) matches_to_disable = await get_matches_to_disable(matchengine, new_matches_hashes, protocol_no, sample_id) # flip is_disabled flag if", "async def get_all_except(matchengine: MatchEngine, protocol_no: str, trial_matches_by_sample_id: dict) -> list: \"\"\"Return all matches", "matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query, projection) ) return matches[0] def get_update_operations(matches_to_disable:", "not matchengine.drop: new_matches_hashes = [match['hash'] for match in matches_by_sample_id[sample_id]] # get existing matches", "1, 'hash': 1, 'clinical_id': 1 } results = await perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection)", "chunk in chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': True, '_updated': updated_time}})) for", "sample_id: :return: \"\"\" query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id': sample_id, 'is_disabled': False, 'hash':", "in matches_by_sample_id[sample_id] if m['hash'] in disabled] ops = get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available, matchengine) else:", "matches\") if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() return log.info(f\"Updating matches for {protocol_no}\") if", "\"\"\" matches_to_not_change_query = MongoQuery({'hash': {'$in': new_matches_hashes}}) projection = {\"hash\": 1, \"is_disabled\": 1} matches", "await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query, projection) ) return matches[0] def get_update_operations(matches_to_disable: list, matches_to_insert:", "new_matches_hashes}}) projection = {\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection,", "existing_hashes: set, sample_id: str) -> list: return [m for m in matches_by_sample_id[sample_id] if", "not in matchengine.matches or protocol_no not in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched", "existing = await get_existing_matches(matchengine, new_matches_hashes) existing_hashes = {result['hash'] for result in existing} disabled", "chunk}}, update={'$set': {'is_disabled': False, '_updated': updated_time}})) return ops def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set,", "return matches[0] def get_update_operations(matches_to_disable: list, matches_to_insert: list, matches_to_mark_available: list, matchengine: MatchEngine) -> list:", "matches_by_sample_id.values(): for match in matches: match['_updated'] = updated_time if protocol_no not in matchengine.matches", "list: \"\"\" Get matches in db which have the same hashes as newly", "m['hash'] in disabled] ops = get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available, matchengine) else: ops = [InsertOne(document=trial_match)", "result in existing} disabled = {result['hash'] for result in existing if result['is_disabled']} #", "MatchEngine, protocol_no: str, trial_matches_by_sample_id: dict) -> list: \"\"\"Return all matches except ones matching", "previously run clinical ids for a specific protocol. The remainder are ids #", "matches_by_sample_id[sample_id] if m['hash'] in disabled] ops = get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available, matchengine) else: ops", "logging.getLogger('matchengine') if TYPE_CHECKING: from matchengine.internals.engine import MatchEngine async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str):", "1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query, projection) ) return", "if TYPE_CHECKING: from matchengine.internals.engine import MatchEngine async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str): \"\"\"", "True, '_updated': updated_time}})) for to_insert in matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes = [trial_match['hash'] for trial_match", "{ '$nin': new_matches_hashes } } matches_to_disable_query = MongoQuery(query) projection = {\"hash\": 1, \"is_disabled\":", "[trial_match['hash'] for trial_match in matches_to_mark_available] for chunk in chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}},", "protocol_no: str, trial_matches_by_sample_id: dict) -> list: \"\"\"Return all matches except ones matching current", ":return: \"\"\" query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id': sample_id, 'is_disabled': False, 'hash': {", ") return matches[0] def get_update_operations(matches_to_disable: list, matches_to_insert: list, matches_to_mark_available: list, matchengine: MatchEngine) ->", "for trial_match in matches_to_mark_available] for chunk in chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set':", "run. Done for every sample_id :param matchengine: :param new_matches_hashes: :param protocol_no: :param sample_id:", "for chunk in chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': True, '_updated': updated_time}}))", "= {\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query, projection)", "await get_all_except(matchengine, protocol_no, matches_by_sample_id) delete_ops = await get_delete_ops(matches_to_disable, matchengine) matchengine.task_q.put_nowait(UpdateTask(delete_ops, protocol_no)) for sample_id", "not matchengine.matches[protocol_no]: for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask( [UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id': {'$in':", "await perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection) return [result for result in results] async def", "str, trial_matches_by_sample_id: dict) -> list: \"\"\"Return all matches except ones matching current protocol_no\"\"\"", "by sample id if not matchengine.matches[protocol_no]: for chunk in chunk_list(list(matchengine.clinical_ids_for_protocol_cache[protocol_no]), matchengine.chunk_size): matchengine.task_q.put_nowait( UpdateTask(", "matches in matches_by_sample_id.values(): for match in matches: match['_updated'] = updated_time if protocol_no not", "= {result['hash'] for result in existing if result['is_disabled']} # insert new matches if", "previously, but not in the current run. if protocol_no in matchengine.clinical_run_log_entries: clinical_ids =", "# if protocol has been run previously, subtract clinical ids from current run", "def get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes: list, protocol_no: str, sample_id: str) -> list: \"\"\" Get", "Get matches to disable by looking for existing, enabled matches whose hashes are", "Delete matches by adding {is_disabled: true} and insert all new matches. \"\"\" matches_by_sample_id", "import datetime import logging from typing import TYPE_CHECKING from pymongo import UpdateMany, InsertOne", "get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes: list, protocol_no: str, sample_id: str) -> list: \"\"\" Get matches", "by diff'ing the newly created trial matches against existing matches in the db.", "matches[0] async def get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes: list, protocol_no: str, sample_id: str) -> list:", "matchengine.internals.utilities.utilities import perform_db_call logging.basicConfig(level=logging.INFO) log = logging.getLogger('matchengine') if TYPE_CHECKING: from matchengine.internals.engine import MatchEngine", "matches except ones matching current protocol_no\"\"\" # get clinical ids with matches clinical_ids", "list, matches_to_insert: list, matches_to_mark_available: list, matchengine: MatchEngine) -> list: ops = list() updated_time", "= { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\": { '$in': [clinical_id for clinical_id in clinical_ids] }", "{result['hash'] for result in existing if result['is_disabled']} # insert new matches if they", "result in results] async def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) -> list: updated_time =", "matches[0] def get_update_operations(matches_to_disable: list, matches_to_insert: list, matches_to_mark_available: list, matchengine: MatchEngine) -> list: ops", "match['_updated'] = updated_time if protocol_no not in matchengine.matches or protocol_no not in matchengine._trials_to_match_on:", "updated_time}})], protocol_no ) ) else: # Get matches to disable and issue queries", "already exist. disable everything else matches_to_insert = get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id) matches_to_disable = await", "they don't already exist. disable everything else matches_to_insert = get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id) matches_to_disable", "if not matchengine.drop: new_matches_hashes = [match['hash'] for match in matches_by_sample_id[sample_id]] # get existing", "new_matches_hashes } } matches_to_disable_query = MongoQuery(query) projection = {\"hash\": 1, \"is_disabled\": 1} matches", "= [match['hash'] for match in matches_by_sample_id[sample_id]] # get existing matches in db with", "MongoQuery({'hash': {'$in': new_matches_hashes}}) projection = {\"hash\": 1, \"is_disabled\": 1} matches = await asyncio.gather(", "get_update_operations(matches_to_disable, matches_to_insert, matches_to_mark_available, matchengine) else: ops = [InsertOne(document=trial_match) for trial_match in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops,", "{ matchengine.match_criteria_transform.match_trial_link_id: protocol_no, \"clinical_id\": { '$in': [clinical_id for clinical_id in clinical_ids] } }", "False, '_updated': updated_time}})) return ops def get_matches_to_insert(matches_by_sample_id: list, existing_hashes: set, sample_id: str) ->", "update={'$set': {'is_disabled': True, '_updated': updated_time}})) for to_insert in matches_to_insert: ops.append(InsertOne(document=to_insert)) available_hashes = [trial_match['hash']", "newly found matches existing = await get_existing_matches(matchengine, new_matches_hashes) existing_hashes = {result['hash'] for result", "perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query, projection) ) return matches[0] async def get_matches_to_disable(matchengine: MatchEngine, new_matches_hashes: list,", "in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() async def get_all_except(matchengine:", "results] async def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) -> list: updated_time = datetime.datetime.now() hashes", "def get_delete_ops(matches_to_disable: list, matchengine: MatchEngine) -> list: updated_time = datetime.datetime.now() hashes = [result['hash']", "The remainder are ids # which were run previously, but not in the", ":param matchengine: :param new_matches_hashes: :param protocol_no: :param sample_id: :return: \"\"\" query = {", "[UpdateMany(filter={matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'clinical_id': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})], protocol_no ) )", "found, disable all match records by sample id if not matchengine.matches[protocol_no]: for chunk", "matchengine: MatchEngine) -> list: updated_time = datetime.datetime.now() hashes = [result['hash'] for result in", "if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() async def get_all_except(matchengine: MatchEngine, protocol_no: str, trial_matches_by_sample_id:", "disable and issue queries matches_to_disable = await get_all_except(matchengine, protocol_no, matches_by_sample_id) delete_ops = await", "update={'$set': {\"is_disabled\": True, '_updated': updated_time}})], protocol_no ) ) else: # Get matches to", "{matchengine.match_criteria_transform.trial_collection} matches\") if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() return log.info(f\"Updating matches for {protocol_no}\")", "not matchengine.drop: # If no matches are found, disable all match records by", "= await asyncio.gather( perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_not_change_query, projection) ) return matches[0] async def get_matches_to_disable(matchengine:", "or protocol_no not in matchengine._trials_to_match_on: log.info(f\"{matchengine.match_criteria_transform.trial_collection} {protocol_no} was not matched on, not updating", "matches_to_mark_available: list, matchengine: MatchEngine) -> list: ops = list() updated_time = datetime.datetime.now() disable_hashes", "is_disabled flag if a new match generated during run matches hash of an", "clinical_ids] } } projection = { '_id': 1, 'hash': 1, 'clinical_id': 1 }", "but not in the current run. if protocol_no in matchengine.clinical_run_log_entries: clinical_ids = matchengine.clinical_run_log_entries[protocol_no]", "return ops async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) -> list: \"\"\" Get matches", "-> list: return [m for m in matches_by_sample_id[sample_id] if m['hash'] not in existing_hashes]", "{\"is_disabled\": True, '_updated': updated_time}})], protocol_no ) ) else: # Get matches to disable", "str) -> list: \"\"\" Get matches to disable by looking for existing, enabled", "= get_matches_to_insert(matches_by_sample_id, existing_hashes, sample_id) matches_to_disable = await get_matches_to_disable(matchengine, new_matches_hashes, protocol_no, sample_id) # flip", "a new match generated during run matches hash of an existing matches_to_mark_available =", "for result in matches_to_disable] ops = list() for chunk in chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash':", "the current run. if protocol_no in matchengine.clinical_run_log_entries: clinical_ids = matchengine.clinical_run_log_entries[protocol_no] - clinical_ids query", "clinical ids for a specific protocol. The remainder are ids # which were", "new_matches_hashes: :param protocol_no: :param sample_id: :return: \"\"\" query = { matchengine.match_criteria_transform.match_trial_link_id: protocol_no, 'sample_id':", "= [trial_match['hash'] for trial_match in matches_to_disable] for chunk in chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in':", "list, matches_to_mark_available: list, matchengine: MatchEngine) -> list: ops = list() updated_time = datetime.datetime.now()", "in matches_to_mark_available] for chunk in chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': False,", "} projection = { '_id': 1, 'hash': 1, 'clinical_id': 1 } results =", "projection) ) return matches[0] def get_update_operations(matches_to_disable: list, matches_to_insert: list, matches_to_mark_available: list, matchengine: MatchEngine)", "in matches: match['_updated'] = updated_time if protocol_no not in matchengine.matches or protocol_no not", "True, '_updated': updated_time}})) return ops async def get_existing_matches(matchengine: MatchEngine, new_matches_hashes: list) -> list:", "was not matched on, not updating {matchengine.match_criteria_transform.trial_collection} matches\") if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await", "disable_hashes = [trial_match['hash'] for trial_match in matches_to_disable] for chunk in chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash':", "new_matches_hashes, protocol_no, sample_id) # flip is_disabled flag if a new match generated during", "matches by diff'ing the newly created trial matches against existing matches in the", "perform_db_call(matchengine, matchengine.trial_match_collection, matches_to_disable_query, projection) ) return matches[0] def get_update_operations(matches_to_disable: list, matches_to_insert: list, matches_to_mark_available:", "in chunk_list(disable_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': True, '_updated': updated_time}})) for to_insert", "hashes are not present in newly generated matches during current run. Done for", "= [InsertOne(document=trial_match) for trial_match in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await", "which were run previously, but not in the current run. if protocol_no in", "TYPE_CHECKING: from matchengine.internals.engine import MatchEngine async def async_update_matches_by_protocol_no(matchengine: MatchEngine, protocol_no: str): \"\"\" Update", "= datetime.datetime.now() for matches in matches_by_sample_id.values(): for match in matches: match['_updated'] = updated_time", "run from # previously run clinical ids for a specific protocol. The remainder", "matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join() async def get_all_except(matchengine: MatchEngine, protocol_no: str, trial_matches_by_sample_id: dict) -> list:", "in chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': False, '_updated': updated_time}})) return ops", "[InsertOne(document=trial_match) for trial_match in matches_by_sample_id[sample_id]] matchengine.task_q.put_nowait(UpdateTask(ops, protocol_no)) if not matchengine.skip_run_log_entry: matchengine.task_q.put_nowait(RunLogUpdateTask(protocol_no)) await matchengine.task_q.join()", "1 } results = await perform_db_call(matchengine, collection=matchengine.trial_match_collection, query=MongoQuery(query), projection=projection) return [result for result", "chunk_list(hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {\"is_disabled\": True, '_updated': updated_time}})) return ops async", "sample_id, 'is_disabled': False, 'hash': { '$nin': new_matches_hashes } } matches_to_disable_query = MongoQuery(query) projection", "matches by adding {is_disabled: true} and insert all new matches. \"\"\" matches_by_sample_id =", "matchengine.internals.typing.matchengine_types import RunLogUpdateTask, UpdateTask, MongoQuery from matchengine.internals.utilities.list_utils import chunk_list from matchengine.internals.utilities.utilities import perform_db_call", "for chunk in chunk_list(available_hashes, matchengine.chunk_size): ops.append(UpdateMany(filter={'hash': {'$in': chunk}}, update={'$set': {'is_disabled': False, '_updated': updated_time}}))", "perform_db_call logging.basicConfig(level=logging.INFO) log = logging.getLogger('matchengine') if TYPE_CHECKING: from matchengine.internals.engine import MatchEngine async def" ]
[ "def test_makeload (): aquests.configure (CONCURRENT, callback = makeload) # cioncurrent for i in", "callback = makeload) # cioncurrent for i in range (CONCURRENT): aquests.get (\"http://127.0.0.1:5000/\", meta", "= 50 MAX_REQ = 1000 _ID = 0 def makeload (response): global _ID", "= 0 def makeload (response): global _ID print (response.meta ['_id'], response.code, response.msg, response.version)", "_ID = 0 def makeload (response): global _ID print (response.meta ['_id'], response.code, response.msg,", "= makeload) # cioncurrent for i in range (CONCURRENT): aquests.get (\"http://127.0.0.1:5000/\", meta =", "_ID += 1 def test_makeload (): aquests.configure (CONCURRENT, callback = makeload) # cioncurrent", "['_id'], response.code, response.msg, response.version) if aquests.countreq () < MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\", meta =", "response.version) if aquests.countreq () < MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID", "0 def makeload (response): global _ID print (response.meta ['_id'], response.code, response.msg, response.version) if", "print (response.meta ['_id'], response.code, response.msg, response.version) if aquests.countreq () < MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\",", "response.code, response.msg, response.version) if aquests.countreq () < MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id':", "< MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID += 1 def test_makeload", "range (CONCURRENT): aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID += 1 aquests.fetchall ()", "50 MAX_REQ = 1000 _ID = 0 def makeload (response): global _ID print", "() < MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID += 1 def", "(response.meta ['_id'], response.code, response.msg, response.version) if aquests.countreq () < MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\", meta", "global _ID print (response.meta ['_id'], response.code, response.msg, response.version) if aquests.countreq () < MAX_REQ:", "def makeload (response): global _ID print (response.meta ['_id'], response.code, response.msg, response.version) if aquests.countreq", "= 1000 _ID = 0 def makeload (response): global _ID print (response.meta ['_id'],", "aquests.configure (CONCURRENT, callback = makeload) # cioncurrent for i in range (CONCURRENT): aquests.get", "MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID += 1 def test_makeload ():", "+= 1 def test_makeload (): aquests.configure (CONCURRENT, callback = makeload) # cioncurrent for", "response.msg, response.version) if aquests.countreq () < MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID})", "(response): global _ID print (response.meta ['_id'], response.code, response.msg, response.version) if aquests.countreq () <", "# cioncurrent for i in range (CONCURRENT): aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID})", "aquests.countreq () < MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID += 1", "aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID += 1 def test_makeload (): aquests.configure", "makeload (response): global _ID print (response.meta ['_id'], response.code, response.msg, response.version) if aquests.countreq ()", "test_makeload (): aquests.configure (CONCURRENT, callback = makeload) # cioncurrent for i in range", "(CONCURRENT, callback = makeload) # cioncurrent for i in range (CONCURRENT): aquests.get (\"http://127.0.0.1:5000/\",", "cioncurrent for i in range (CONCURRENT): aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID", "(\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID += 1 def test_makeload (): aquests.configure (CONCURRENT,", "i in range (CONCURRENT): aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID += 1", "MAX_REQ = 1000 _ID = 0 def makeload (response): global _ID print (response.meta", "makeload) # cioncurrent for i in range (CONCURRENT): aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id':", "= {'_id': _ID}) _ID += 1 def test_makeload (): aquests.configure (CONCURRENT, callback =", "for i in range (CONCURRENT): aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID +=", "(): aquests.configure (CONCURRENT, callback = makeload) # cioncurrent for i in range (CONCURRENT):", "_ID}) _ID += 1 def test_makeload (): aquests.configure (CONCURRENT, callback = makeload) #", "aquests CONCURRENT = 50 MAX_REQ = 1000 _ID = 0 def makeload (response):", "meta = {'_id': _ID}) _ID += 1 def test_makeload (): aquests.configure (CONCURRENT, callback", "1000 _ID = 0 def makeload (response): global _ID print (response.meta ['_id'], response.code,", "{'_id': _ID}) _ID += 1 def test_makeload (): aquests.configure (CONCURRENT, callback = makeload)", "import aquests CONCURRENT = 50 MAX_REQ = 1000 _ID = 0 def makeload", "CONCURRENT = 50 MAX_REQ = 1000 _ID = 0 def makeload (response): global", "_ID print (response.meta ['_id'], response.code, response.msg, response.version) if aquests.countreq () < MAX_REQ: aquests.get", "if aquests.countreq () < MAX_REQ: aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID +=", "1 def test_makeload (): aquests.configure (CONCURRENT, callback = makeload) # cioncurrent for i", "in range (CONCURRENT): aquests.get (\"http://127.0.0.1:5000/\", meta = {'_id': _ID}) _ID += 1 aquests.fetchall" ]
[ "- 1, -1 , -1): result += char return result def reverse_string_recursive(string): if", "''' Reverse a string ''' def reverse_string_iterative(string): result = '' for char in", "''' def reverse_string_iterative(string): result = '' for char in range(len(string) - 1, -1", "+= char return result def reverse_string_recursive(string): if string: return reverse_string_recursive(string[1:]) + string[0] return", "-1): result += char return result def reverse_string_recursive(string): if string: return reverse_string_recursive(string[1:]) +", ", -1): result += char return result def reverse_string_recursive(string): if string: return reverse_string_recursive(string[1:])", "result = '' for char in range(len(string) - 1, -1 , -1): result", "def reverse_string_recursive(string): if string: return reverse_string_recursive(string[1:]) + string[0] return '' def reverse_string_pythonic(string): return", "return result def reverse_string_recursive(string): if string: return reverse_string_recursive(string[1:]) + string[0] return '' def", "result += char return result def reverse_string_recursive(string): if string: return reverse_string_recursive(string[1:]) + string[0]", "'' for char in range(len(string) - 1, -1 , -1): result += char", "def reverse_string_iterative(string): result = '' for char in range(len(string) - 1, -1 ,", "string ''' def reverse_string_iterative(string): result = '' for char in range(len(string) - 1,", "range(len(string) - 1, -1 , -1): result += char return result def reverse_string_recursive(string):", "char in range(len(string) - 1, -1 , -1): result += char return result", "-1 , -1): result += char return result def reverse_string_recursive(string): if string: return", "for char in range(len(string) - 1, -1 , -1): result += char return", "a string ''' def reverse_string_iterative(string): result = '' for char in range(len(string) -", "char return result def reverse_string_recursive(string): if string: return reverse_string_recursive(string[1:]) + string[0] return ''", "reverse_string_iterative(string): result = '' for char in range(len(string) - 1, -1 , -1):", "= '' for char in range(len(string) - 1, -1 , -1): result +=", "Reverse a string ''' def reverse_string_iterative(string): result = '' for char in range(len(string)", "reverse_string_recursive(string): if string: return reverse_string_recursive(string[1:]) + string[0] return '' def reverse_string_pythonic(string): return string[::-1]", "result def reverse_string_recursive(string): if string: return reverse_string_recursive(string[1:]) + string[0] return '' def reverse_string_pythonic(string):", "in range(len(string) - 1, -1 , -1): result += char return result def", "1, -1 , -1): result += char return result def reverse_string_recursive(string): if string:" ]
[ "def redo(self,*args, **kwargs): \"\"\" Redoes 0'th redo. \"\"\" return None def disable(self,*args, **kwargs):", "def end(self,*args, **kwargs): \"\"\" Complete current undo set and add it to the", "undo set and add it to the undo list. \"\"\" return None def", "None def new(self,*args, **kwargs): \"\"\" Same as end();begin(). \"\"\" return None def cancel(self,*args,", "return None def begin(self,*args, **kwargs): \"\"\" Begin a new user-visible group of undo", "\"\"\" Number of undo's that can be done. \"\"\" return None def redoSize(self,*args,", "redo's that can be done. \"\"\" return None def undoTruncate(self,*args, **kwargs): \"\"\" Destroy", "enable(self,*args, **kwargs): \"\"\" Undoes the previous disable() \"\"\" return None def disabled(self,*args, **kwargs):", "done. \"\"\" return None def redoSize(self,*args, **kwargs): \"\"\" Number of redo's that can", "\"\"\" Undoes any actions recorded in the current set and throws it away.", "disable(self,*args, **kwargs): \"\"\" Prevent recording undos until matching enable() \"\"\" return None def", "end();begin(). \"\"\" return None def cancel(self,*args, **kwargs): \"\"\" Undoes any actions recorded in", "redoTruncate(self,*args, **kwargs): \"\"\" Destroy any redo's greater or equal to n. \"\"\" return", "**kwargs): \"\"\" Undoes any actions recorded in the current set and throws it", "of redo's that can be done. \"\"\" return None def undoTruncate(self,*args, **kwargs): \"\"\"", "Return short description of redo n. \"\"\" return None def undoDescribeFully(self,*args, **kwargs): \"\"\"", "None def undoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of undo n. \"\"\" return", "throws it away. \"\"\" return None def undoSize(self,*args, **kwargs): \"\"\" Number of undo's", "None def __new__(self,*args, **kwargs): \"\"\" Create and return a new object. See help(type)", "to the undo list. \"\"\" return None def new(self,*args, **kwargs): \"\"\" Same as", "\"\"\" return None def name(self,*args, **kwargs): \"\"\" Name current undo set. \"\"\" return", "None def name(self,*args, **kwargs): \"\"\" Name current undo set. \"\"\" return None def", "return None def __init__(self, *args, **kwargs): \"\"\" Initialize self. See help(type(self)) for accurate", "a new user-visible group of undo actions. \"\"\" return None def name(self,*args, **kwargs):", "**kwargs): \"\"\" Number of undo's that can be done. \"\"\" return None def", "\"\"\" Return short description of redo n. \"\"\" return None def undoDescribeFully(self,*args, **kwargs):", "\"\"\" Same as end();begin(). \"\"\" return None def cancel(self,*args, **kwargs): \"\"\" Undoes any", "**kwargs): \"\"\" Destroy any undo's greater or equal to n. \"\"\" return None", "Destroy any redo's greater or equal to n. \"\"\" return None def undoDescribe(self,*args,", "done. \"\"\" return None def undoTruncate(self,*args, **kwargs): \"\"\" Destroy any undo's greater or", "Return long description of redo n. \"\"\" return None def undo(self,*args, **kwargs): \"\"\"", "None def disabled(self,*args, **kwargs): \"\"\" True if disable() has been called \"\"\" return", "redoDescribe(self,*args, **kwargs): \"\"\" Return short description of redo n. \"\"\" return None def", "\"\"\" \"\"\" return None def __exit__(self,*args, **kwargs): \"\"\" \"\"\" return None def __init__(self,", "def __new__(self,*args, **kwargs): \"\"\" Create and return a new object. See help(type) for", "def redoSize(self,*args, **kwargs): \"\"\" Number of redo's that can be done. \"\"\" return", ". import * class Undo(object): \"\"\" Undo \"\"\" def __hash__(self, ): \"\"\" Return", "Same as end();begin(). \"\"\" return None def cancel(self,*args, **kwargs): \"\"\" Undoes any actions", "\"\"\" return None def cancel(self,*args, **kwargs): \"\"\" Undoes any actions recorded in the", "equal to n. \"\"\" return None def undoDescribe(self,*args, **kwargs): \"\"\" Return short description", "undo n. \"\"\" return None def redoDescribe(self,*args, **kwargs): \"\"\" Return short description of", "\"\"\" True if disable() has been called \"\"\" return None def __enter__(self,*args, **kwargs):", "\"\"\" return None def __exit__(self,*args, **kwargs): \"\"\" \"\"\" return None def __init__(self, *args,", "Number of redo's that can be done. \"\"\" return None def undoTruncate(self,*args, **kwargs):", "and throws it away. \"\"\" return None def undoSize(self,*args, **kwargs): \"\"\" Number of", "\"\"\" def __hash__(self, ): \"\"\" Return hash(self). \"\"\" return None def __new__(self,*args, **kwargs):", "**kwargs): \"\"\" Destroy any redo's greater or equal to n. \"\"\" return None", "Undo \"\"\" def __hash__(self, ): \"\"\" Return hash(self). \"\"\" return None def __new__(self,*args,", "**kwargs): \"\"\" Return short description of undo n. \"\"\" return None def redoDescribe(self,*args,", "def cancel(self,*args, **kwargs): \"\"\" Undoes any actions recorded in the current set and", "if disable() has been called \"\"\" return None def __enter__(self,*args, **kwargs): \"\"\" \"\"\"", "\"\"\" return None def begin(self,*args, **kwargs): \"\"\" Begin a new user-visible group of", "**kwargs): \"\"\" Create and return a new object. See help(type) for accurate signature.", "from numbers import Number from typing import * import nuke from . import", "undoDescribe(self,*args, **kwargs): \"\"\" Return short description of undo n. \"\"\" return None def", "Redoes 0'th redo. \"\"\" return None def disable(self,*args, **kwargs): \"\"\" Prevent recording undos", "return None def redoTruncate(self,*args, **kwargs): \"\"\" Destroy any redo's greater or equal to", "None def disable(self,*args, **kwargs): \"\"\" Prevent recording undos until matching enable() \"\"\" return", "None def enable(self,*args, **kwargs): \"\"\" Undoes the previous disable() \"\"\" return None def", "cancel(self,*args, **kwargs): \"\"\" Undoes any actions recorded in the current set and throws", "Number from typing import * import nuke from . import * class Undo(object):", "set. \"\"\" return None def end(self,*args, **kwargs): \"\"\" Complete current undo set and", "\"\"\" return None def __enter__(self,*args, **kwargs): \"\"\" \"\"\" return None def __exit__(self,*args, **kwargs):", "\"\"\" return None def new(self,*args, **kwargs): \"\"\" Same as end();begin(). \"\"\" return None", "redo's greater or equal to n. \"\"\" return None def undoDescribe(self,*args, **kwargs): \"\"\"", "short description of redo n. \"\"\" return None def undoDescribeFully(self,*args, **kwargs): \"\"\" Return", "user-visible group of undo actions. \"\"\" return None def name(self,*args, **kwargs): \"\"\" Name", "Return long description of undo n. \"\"\" return None def redoDescribeFully(self,*args, **kwargs): \"\"\"", "away. \"\"\" return None def undoSize(self,*args, **kwargs): \"\"\" Number of undo's that can", "Prevent recording undos until matching enable() \"\"\" return None def enable(self,*args, **kwargs): \"\"\"", "import nuke from . import * class Undo(object): \"\"\" Undo \"\"\" def __hash__(self,", "the current set and throws it away. \"\"\" return None def undoSize(self,*args, **kwargs):", "def undoSize(self,*args, **kwargs): \"\"\" Number of undo's that can be done. \"\"\" return", "__enter__(self,*args, **kwargs): \"\"\" \"\"\" return None def __exit__(self,*args, **kwargs): \"\"\" \"\"\" return None", "set and throws it away. \"\"\" return None def undoSize(self,*args, **kwargs): \"\"\" Number", "can be done. \"\"\" return None def undoTruncate(self,*args, **kwargs): \"\"\" Destroy any undo's", "disable() has been called \"\"\" return None def __enter__(self,*args, **kwargs): \"\"\" \"\"\" return", "None def __init__(self, *args, **kwargs): \"\"\" Initialize self. See help(type(self)) for accurate signature.", "matching enable() \"\"\" return None def enable(self,*args, **kwargs): \"\"\" Undoes the previous disable()", "of undo's that can be done. \"\"\" return None def redoSize(self,*args, **kwargs): \"\"\"", "be done. \"\"\" return None def undoTruncate(self,*args, **kwargs): \"\"\" Destroy any undo's greater", "return None def disable(self,*args, **kwargs): \"\"\" Prevent recording undos until matching enable() \"\"\"", "\"\"\" return None def redoDescribe(self,*args, **kwargs): \"\"\" Return short description of redo n.", "\"\"\" return None def __init__(self, *args, **kwargs): \"\"\" Initialize self. See help(type(self)) for", "): \"\"\" Return hash(self). \"\"\" return None def __new__(self,*args, **kwargs): \"\"\" Create and", "\"\"\" return None def undo(self,*args, **kwargs): \"\"\" Undoes 0'th undo. \"\"\" return None", "description of undo n. \"\"\" return None def redoDescribe(self,*args, **kwargs): \"\"\" Return short", "return None def redo(self,*args, **kwargs): \"\"\" Redoes 0'th redo. \"\"\" return None def", "it to the undo list. \"\"\" return None def new(self,*args, **kwargs): \"\"\" Same", "for accurate signature. \"\"\" return None def begin(self,*args, **kwargs): \"\"\" Begin a new", "n. \"\"\" return None def redoDescribe(self,*args, **kwargs): \"\"\" Return short description of redo", "of redo n. \"\"\" return None def undoDescribeFully(self,*args, **kwargs): \"\"\" Return long description", "return None def name(self,*args, **kwargs): \"\"\" Name current undo set. \"\"\" return None", "description of redo n. \"\"\" return None def undoDescribeFully(self,*args, **kwargs): \"\"\" Return long", "enable() \"\"\" return None def enable(self,*args, **kwargs): \"\"\" Undoes the previous disable() \"\"\"", "Undo(object): \"\"\" Undo \"\"\" def __hash__(self, ): \"\"\" Return hash(self). \"\"\" return None", "return None def __exit__(self,*args, **kwargs): \"\"\" \"\"\" return None def __init__(self, *args, **kwargs):", "import Number from typing import * import nuke from . import * class", "disable() \"\"\" return None def disabled(self,*args, **kwargs): \"\"\" True if disable() has been", "been called \"\"\" return None def __enter__(self,*args, **kwargs): \"\"\" \"\"\" return None def", "typing import * import nuke from . import * class Undo(object): \"\"\" Undo", "undo set. \"\"\" return None def end(self,*args, **kwargs): \"\"\" Complete current undo set", "return None def __enter__(self,*args, **kwargs): \"\"\" \"\"\" return None def __exit__(self,*args, **kwargs): \"\"\"", "current set and throws it away. \"\"\" return None def undoSize(self,*args, **kwargs): \"\"\"", "True if disable() has been called \"\"\" return None def __enter__(self,*args, **kwargs): \"\"\"", "0'th undo. \"\"\" return None def redo(self,*args, **kwargs): \"\"\" Redoes 0'th redo. \"\"\"", "a new object. See help(type) for accurate signature. \"\"\" return None def begin(self,*args,", "group of undo actions. \"\"\" return None def name(self,*args, **kwargs): \"\"\" Name current", "any actions recorded in the current set and throws it away. \"\"\" return", "def __enter__(self,*args, **kwargs): \"\"\" \"\"\" return None def __exit__(self,*args, **kwargs): \"\"\" \"\"\" return", "\"\"\" return None def enable(self,*args, **kwargs): \"\"\" Undoes the previous disable() \"\"\" return", "\"\"\" Destroy any redo's greater or equal to n. \"\"\" return None def", "* import nuke from . import * class Undo(object): \"\"\" Undo \"\"\" def", "import * class Undo(object): \"\"\" Undo \"\"\" def __hash__(self, ): \"\"\" Return hash(self).", "\"\"\" return None def undoTruncate(self,*args, **kwargs): \"\"\" Destroy any undo's greater or equal", "\"\"\" Return short description of undo n. \"\"\" return None def redoDescribe(self,*args, **kwargs):", "return None def disabled(self,*args, **kwargs): \"\"\" True if disable() has been called \"\"\"", "None def __exit__(self,*args, **kwargs): \"\"\" \"\"\" return None def __init__(self, *args, **kwargs): \"\"\"", "Name current undo set. \"\"\" return None def end(self,*args, **kwargs): \"\"\" Complete current", "None def redoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of redo n. \"\"\" return", "undo's that can be done. \"\"\" return None def redoSize(self,*args, **kwargs): \"\"\" Number", "new object. See help(type) for accurate signature. \"\"\" return None def begin(self,*args, **kwargs):", "None def begin(self,*args, **kwargs): \"\"\" Begin a new user-visible group of undo actions.", "**kwargs): \"\"\" Prevent recording undos until matching enable() \"\"\" return None def enable(self,*args,", "Begin a new user-visible group of undo actions. \"\"\" return None def name(self,*args,", "**kwargs): \"\"\" True if disable() has been called \"\"\" return None def __enter__(self,*args,", "def redoTruncate(self,*args, **kwargs): \"\"\" Destroy any redo's greater or equal to n. \"\"\"", "in the current set and throws it away. \"\"\" return None def undoSize(self,*args,", "undoTruncate(self,*args, **kwargs): \"\"\" Destroy any undo's greater or equal to n. \"\"\" return", "of undo n. \"\"\" return None def redoDescribe(self,*args, **kwargs): \"\"\" Return short description", "return None def undoSize(self,*args, **kwargs): \"\"\" Number of undo's that can be done.", "description of redo n. \"\"\" return None def undo(self,*args, **kwargs): \"\"\" Undoes 0'th", "Undoes 0'th undo. \"\"\" return None def redo(self,*args, **kwargs): \"\"\" Redoes 0'th redo.", "**kwargs): \"\"\" Return long description of redo n. \"\"\" return None def undo(self,*args,", "def name(self,*args, **kwargs): \"\"\" Name current undo set. \"\"\" return None def end(self,*args,", "be done. \"\"\" return None def redoSize(self,*args, **kwargs): \"\"\" Number of redo's that", "\"\"\" Number of redo's that can be done. \"\"\" return None def undoTruncate(self,*args,", "of redo n. \"\"\" return None def undo(self,*args, **kwargs): \"\"\" Undoes 0'th undo.", "from . import * class Undo(object): \"\"\" Undo \"\"\" def __hash__(self, ): \"\"\"", "n. \"\"\" return None def undo(self,*args, **kwargs): \"\"\" Undoes 0'th undo. \"\"\" return", "long description of undo n. \"\"\" return None def redoDescribeFully(self,*args, **kwargs): \"\"\" Return", "help(type) for accurate signature. \"\"\" return None def begin(self,*args, **kwargs): \"\"\" Begin a", "class Undo(object): \"\"\" Undo \"\"\" def __hash__(self, ): \"\"\" Return hash(self). \"\"\" return", "redo n. \"\"\" return None def undoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of", "object. See help(type) for accurate signature. \"\"\" return None def begin(self,*args, **kwargs): \"\"\"", "the previous disable() \"\"\" return None def disabled(self,*args, **kwargs): \"\"\" True if disable()", "nuke from . import * class Undo(object): \"\"\" Undo \"\"\" def __hash__(self, ):", "None def undo(self,*args, **kwargs): \"\"\" Undoes 0'th undo. \"\"\" return None def redo(self,*args,", "**kwargs): \"\"\" \"\"\" return None def __init__(self, *args, **kwargs): \"\"\" Initialize self. See", "return None def undoTruncate(self,*args, **kwargs): \"\"\" Destroy any undo's greater or equal to", "\"\"\" return None def redo(self,*args, **kwargs): \"\"\" Redoes 0'th redo. \"\"\" return None", "redo. \"\"\" return None def disable(self,*args, **kwargs): \"\"\" Prevent recording undos until matching", "<gh_stars>1-10 from numbers import Number from typing import * import nuke from .", "import * import nuke from . import * class Undo(object): \"\"\" Undo \"\"\"", "description of undo n. \"\"\" return None def redoDescribeFully(self,*args, **kwargs): \"\"\" Return long", "**kwargs): \"\"\" Name current undo set. \"\"\" return None def end(self,*args, **kwargs): \"\"\"", "undo list. \"\"\" return None def new(self,*args, **kwargs): \"\"\" Same as end();begin(). \"\"\"", "accurate signature. \"\"\" return None def begin(self,*args, **kwargs): \"\"\" Begin a new user-visible", "redoSize(self,*args, **kwargs): \"\"\" Number of redo's that can be done. \"\"\" return None", "return a new object. See help(type) for accurate signature. \"\"\" return None def", "return None def cancel(self,*args, **kwargs): \"\"\" Undoes any actions recorded in the current", "greater or equal to n. \"\"\" return None def undoDescribe(self,*args, **kwargs): \"\"\" Return", "def new(self,*args, **kwargs): \"\"\" Same as end();begin(). \"\"\" return None def cancel(self,*args, **kwargs):", "def redoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of redo n. \"\"\" return None", "until matching enable() \"\"\" return None def enable(self,*args, **kwargs): \"\"\" Undoes the previous", "it away. \"\"\" return None def undoSize(self,*args, **kwargs): \"\"\" Number of undo's that", "def enable(self,*args, **kwargs): \"\"\" Undoes the previous disable() \"\"\" return None def disabled(self,*args,", "**kwargs): \"\"\" Begin a new user-visible group of undo actions. \"\"\" return None", "undo n. \"\"\" return None def redoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of", "hash(self). \"\"\" return None def __new__(self,*args, **kwargs): \"\"\" Create and return a new", "undos until matching enable() \"\"\" return None def enable(self,*args, **kwargs): \"\"\" Undoes the", "undoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of undo n. \"\"\" return None def", "undo's greater or equal to n. \"\"\" return None def redoTruncate(self,*args, **kwargs): \"\"\"", "*args, **kwargs): \"\"\" Initialize self. See help(type(self)) for accurate signature. \"\"\" return None", "name(self,*args, **kwargs): \"\"\" Name current undo set. \"\"\" return None def end(self,*args, **kwargs):", "None def undoTruncate(self,*args, **kwargs): \"\"\" Destroy any undo's greater or equal to n.", "**kwargs): \"\"\" Redoes 0'th redo. \"\"\" return None def disable(self,*args, **kwargs): \"\"\" Prevent", "\"\"\" Return long description of redo n. \"\"\" return None def undo(self,*args, **kwargs):", "None def cancel(self,*args, **kwargs): \"\"\" Undoes any actions recorded in the current set", "list. \"\"\" return None def new(self,*args, **kwargs): \"\"\" Same as end();begin(). \"\"\" return", "\"\"\" return None def undoDescribe(self,*args, **kwargs): \"\"\" Return short description of undo n.", "**kwargs): \"\"\" Return short description of redo n. \"\"\" return None def undoDescribeFully(self,*args,", "return None def undoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of undo n. \"\"\"", "**kwargs): \"\"\" Same as end();begin(). \"\"\" return None def cancel(self,*args, **kwargs): \"\"\" Undoes", "\"\"\" Complete current undo set and add it to the undo list. \"\"\"", "def disable(self,*args, **kwargs): \"\"\" Prevent recording undos until matching enable() \"\"\" return None", "def undoDescribe(self,*args, **kwargs): \"\"\" Return short description of undo n. \"\"\" return None", "equal to n. \"\"\" return None def redoTruncate(self,*args, **kwargs): \"\"\" Destroy any redo's", "\"\"\" return None def undoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of undo n.", "* class Undo(object): \"\"\" Undo \"\"\" def __hash__(self, ): \"\"\" Return hash(self). \"\"\"", "0'th redo. \"\"\" return None def disable(self,*args, **kwargs): \"\"\" Prevent recording undos until", "n. \"\"\" return None def undoDescribe(self,*args, **kwargs): \"\"\" Return short description of undo", "n. \"\"\" return None def redoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of redo", "None def redoDescribe(self,*args, **kwargs): \"\"\" Return short description of redo n. \"\"\" return", "return None def enable(self,*args, **kwargs): \"\"\" Undoes the previous disable() \"\"\" return None", "Return short description of undo n. \"\"\" return None def redoDescribe(self,*args, **kwargs): \"\"\"", "\"\"\" Create and return a new object. See help(type) for accurate signature. \"\"\"", "\"\"\" return None def redoTruncate(self,*args, **kwargs): \"\"\" Destroy any redo's greater or equal", "Destroy any undo's greater or equal to n. \"\"\" return None def redoTruncate(self,*args,", "__new__(self,*args, **kwargs): \"\"\" Create and return a new object. See help(type) for accurate", "actions. \"\"\" return None def name(self,*args, **kwargs): \"\"\" Name current undo set. \"\"\"", "Complete current undo set and add it to the undo list. \"\"\" return", "previous disable() \"\"\" return None def disabled(self,*args, **kwargs): \"\"\" True if disable() has", "return None def __new__(self,*args, **kwargs): \"\"\" Create and return a new object. See", "\"\"\" return None def redoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of redo n.", "\"\"\" Return long description of undo n. \"\"\" return None def redoDescribeFully(self,*args, **kwargs):", "greater or equal to n. \"\"\" return None def redoTruncate(self,*args, **kwargs): \"\"\" Destroy", "of undo n. \"\"\" return None def redoDescribeFully(self,*args, **kwargs): \"\"\" Return long description", "None def redo(self,*args, **kwargs): \"\"\" Redoes 0'th redo. \"\"\" return None def disable(self,*args,", "\"\"\" return None def disabled(self,*args, **kwargs): \"\"\" True if disable() has been called", "def __init__(self, *args, **kwargs): \"\"\" Initialize self. See help(type(self)) for accurate signature. \"\"\"", "new(self,*args, **kwargs): \"\"\" Same as end();begin(). \"\"\" return None def cancel(self,*args, **kwargs): \"\"\"", "current undo set. \"\"\" return None def end(self,*args, **kwargs): \"\"\" Complete current undo", "\"\"\" Name current undo set. \"\"\" return None def end(self,*args, **kwargs): \"\"\" Complete", "of undo actions. \"\"\" return None def name(self,*args, **kwargs): \"\"\" Name current undo", "Number of undo's that can be done. \"\"\" return None def redoSize(self,*args, **kwargs):", "None def redoTruncate(self,*args, **kwargs): \"\"\" Destroy any redo's greater or equal to n.", "def undo(self,*args, **kwargs): \"\"\" Undoes 0'th undo. \"\"\" return None def redo(self,*args, **kwargs):", "\"\"\" Return hash(self). \"\"\" return None def __new__(self,*args, **kwargs): \"\"\" Create and return", "redo(self,*args, **kwargs): \"\"\" Redoes 0'th redo. \"\"\" return None def disable(self,*args, **kwargs): \"\"\"", "**kwargs): \"\"\" Number of redo's that can be done. \"\"\" return None def", "None def undoSize(self,*args, **kwargs): \"\"\" Number of undo's that can be done. \"\"\"", "return None def redoDescribe(self,*args, **kwargs): \"\"\" Return short description of redo n. \"\"\"", "Create and return a new object. See help(type) for accurate signature. \"\"\" return", "new user-visible group of undo actions. \"\"\" return None def name(self,*args, **kwargs): \"\"\"", "None def __enter__(self,*args, **kwargs): \"\"\" \"\"\" return None def __exit__(self,*args, **kwargs): \"\"\" \"\"\"", "def begin(self,*args, **kwargs): \"\"\" Begin a new user-visible group of undo actions. \"\"\"", "\"\"\" Begin a new user-visible group of undo actions. \"\"\" return None def", "that can be done. \"\"\" return None def undoTruncate(self,*args, **kwargs): \"\"\" Destroy any", "recorded in the current set and throws it away. \"\"\" return None def", "\"\"\" Destroy any undo's greater or equal to n. \"\"\" return None def", "signature. \"\"\" return None def begin(self,*args, **kwargs): \"\"\" Begin a new user-visible group", "return None def new(self,*args, **kwargs): \"\"\" Same as end();begin(). \"\"\" return None def", "**kwargs): \"\"\" Undoes 0'th undo. \"\"\" return None def redo(self,*args, **kwargs): \"\"\" Redoes", "def undoTruncate(self,*args, **kwargs): \"\"\" Destroy any undo's greater or equal to n. \"\"\"", "\"\"\" Undoes the previous disable() \"\"\" return None def disabled(self,*args, **kwargs): \"\"\" True", "def redoDescribe(self,*args, **kwargs): \"\"\" Return short description of redo n. \"\"\" return None", "\"\"\" return None def end(self,*args, **kwargs): \"\"\" Complete current undo set and add", "undo actions. \"\"\" return None def name(self,*args, **kwargs): \"\"\" Name current undo set.", "and add it to the undo list. \"\"\" return None def new(self,*args, **kwargs):", "def __hash__(self, ): \"\"\" Return hash(self). \"\"\" return None def __new__(self,*args, **kwargs): \"\"\"", "the undo list. \"\"\" return None def new(self,*args, **kwargs): \"\"\" Same as end();begin().", "**kwargs): \"\"\" Complete current undo set and add it to the undo list.", "\"\"\" return None def undoSize(self,*args, **kwargs): \"\"\" Number of undo's that can be", "any redo's greater or equal to n. \"\"\" return None def undoDescribe(self,*args, **kwargs):", "See help(type) for accurate signature. \"\"\" return None def begin(self,*args, **kwargs): \"\"\" Begin", "redo n. \"\"\" return None def undo(self,*args, **kwargs): \"\"\" Undoes 0'th undo. \"\"\"", "None def end(self,*args, **kwargs): \"\"\" Complete current undo set and add it to", "and return a new object. See help(type) for accurate signature. \"\"\" return None", "return None def end(self,*args, **kwargs): \"\"\" Complete current undo set and add it", "from typing import * import nuke from . import * class Undo(object): \"\"\"", "add it to the undo list. \"\"\" return None def new(self,*args, **kwargs): \"\"\"", "\"\"\" return None def __new__(self,*args, **kwargs): \"\"\" Create and return a new object.", "called \"\"\" return None def __enter__(self,*args, **kwargs): \"\"\" \"\"\" return None def __exit__(self,*args,", "numbers import Number from typing import * import nuke from . import *", "return None def redoSize(self,*args, **kwargs): \"\"\" Number of redo's that can be done.", "to n. \"\"\" return None def undoDescribe(self,*args, **kwargs): \"\"\" Return short description of", "\"\"\" Undo \"\"\" def __hash__(self, ): \"\"\" Return hash(self). \"\"\" return None def", "Return hash(self). \"\"\" return None def __new__(self,*args, **kwargs): \"\"\" Create and return a", "end(self,*args, **kwargs): \"\"\" Complete current undo set and add it to the undo", "undoSize(self,*args, **kwargs): \"\"\" Number of undo's that can be done. \"\"\" return None", "to n. \"\"\" return None def redoTruncate(self,*args, **kwargs): \"\"\" Destroy any redo's greater", "\"\"\" Prevent recording undos until matching enable() \"\"\" return None def enable(self,*args, **kwargs):", "undo. \"\"\" return None def redo(self,*args, **kwargs): \"\"\" Redoes 0'th redo. \"\"\" return", "has been called \"\"\" return None def __enter__(self,*args, **kwargs): \"\"\" \"\"\" return None", "**kwargs): \"\"\" Undoes the previous disable() \"\"\" return None def disabled(self,*args, **kwargs): \"\"\"", "\"\"\" Undoes 0'th undo. \"\"\" return None def redo(self,*args, **kwargs): \"\"\" Redoes 0'th", "return None def undo(self,*args, **kwargs): \"\"\" Undoes 0'th undo. \"\"\" return None def", "return None def undoDescribe(self,*args, **kwargs): \"\"\" Return short description of undo n. \"\"\"", "or equal to n. \"\"\" return None def redoTruncate(self,*args, **kwargs): \"\"\" Destroy any", "\"\"\" return None def redoSize(self,*args, **kwargs): \"\"\" Number of redo's that can be", "def disabled(self,*args, **kwargs): \"\"\" True if disable() has been called \"\"\" return None", "that can be done. \"\"\" return None def redoSize(self,*args, **kwargs): \"\"\" Number of", "any undo's greater or equal to n. \"\"\" return None def redoTruncate(self,*args, **kwargs):", "begin(self,*args, **kwargs): \"\"\" Begin a new user-visible group of undo actions. \"\"\" return", "undo(self,*args, **kwargs): \"\"\" Undoes 0'th undo. \"\"\" return None def redo(self,*args, **kwargs): \"\"\"", "def __exit__(self,*args, **kwargs): \"\"\" \"\"\" return None def __init__(self, *args, **kwargs): \"\"\" Initialize", "or equal to n. \"\"\" return None def undoDescribe(self,*args, **kwargs): \"\"\" Return short", "recording undos until matching enable() \"\"\" return None def enable(self,*args, **kwargs): \"\"\" Undoes", "Undoes any actions recorded in the current set and throws it away. \"\"\"", "__init__(self, *args, **kwargs): \"\"\" Initialize self. See help(type(self)) for accurate signature. \"\"\" return", "n. \"\"\" return None def undoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of undo", "None def undoDescribe(self,*args, **kwargs): \"\"\" Return short description of undo n. \"\"\" return", "disabled(self,*args, **kwargs): \"\"\" True if disable() has been called \"\"\" return None def", "redoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of redo n. \"\"\" return None def", "**kwargs): \"\"\" \"\"\" return None def __exit__(self,*args, **kwargs): \"\"\" \"\"\" return None def", "set and add it to the undo list. \"\"\" return None def new(self,*args,", "as end();begin(). \"\"\" return None def cancel(self,*args, **kwargs): \"\"\" Undoes any actions recorded", "long description of redo n. \"\"\" return None def undo(self,*args, **kwargs): \"\"\" Undoes", "Undoes the previous disable() \"\"\" return None def disabled(self,*args, **kwargs): \"\"\" True if", "\"\"\" \"\"\" return None def __init__(self, *args, **kwargs): \"\"\" Initialize self. See help(type(self))", "__hash__(self, ): \"\"\" Return hash(self). \"\"\" return None def __new__(self,*args, **kwargs): \"\"\" Create", "short description of undo n. \"\"\" return None def redoDescribe(self,*args, **kwargs): \"\"\" Return", "\"\"\" Redoes 0'th redo. \"\"\" return None def disable(self,*args, **kwargs): \"\"\" Prevent recording", "**kwargs): \"\"\" Return long description of undo n. \"\"\" return None def redoDescribeFully(self,*args,", "return None def redoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of redo n. \"\"\"", "n. \"\"\" return None def redoTruncate(self,*args, **kwargs): \"\"\" Destroy any redo's greater or", "def undoDescribeFully(self,*args, **kwargs): \"\"\" Return long description of undo n. \"\"\" return None", "\"\"\" return None def disable(self,*args, **kwargs): \"\"\" Prevent recording undos until matching enable()", "actions recorded in the current set and throws it away. \"\"\" return None", "None def redoSize(self,*args, **kwargs): \"\"\" Number of redo's that can be done. \"\"\"", "can be done. \"\"\" return None def redoSize(self,*args, **kwargs): \"\"\" Number of redo's", "current undo set and add it to the undo list. \"\"\" return None", "__exit__(self,*args, **kwargs): \"\"\" \"\"\" return None def __init__(self, *args, **kwargs): \"\"\" Initialize self." ]
[ "import load_dotenv from O365 import Account load_dotenv() credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials) account", "print(credentials) account = Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if not isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!')", "date_query = calendar \\ .new_query('start') \\ .greater_equal(datetime(now.year, now.month, now.day, 0, 0, 0)) \\", "\\ .greater_equal(datetime(now.year, now.month, now.day, 0, 0, 0)) \\ .chain('and') \\ .on_attribute('end') \\ .less_equal(datetime(now.year,", "import isfile from dotenv import load_dotenv from O365 import Account load_dotenv() credentials =", "= schedule.get_default_calendar() now = datetime.now() date_query = calendar \\ .new_query('start') \\ .greater_equal(datetime(now.year, now.month,", "tenant_id=getenv('TENANT_ID')) if not isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!') else: account.connection.refresh_token() schedule = account.schedule()", "if not isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!') else: account.connection.refresh_token() schedule = account.schedule() calendar", "now.month, now.day, 0, 0, 0)) \\ .chain('and') \\ .on_attribute('end') \\ .less_equal(datetime(now.year, now.month, now.day,", "import datetime from os import getenv from os.path import isfile from dotenv import", "now.day, 23, 59, 59)) events = list(calendar.get_events(query=date_query, include_recurring=True)) with open(\"events.txt\", \"w\") as f:", "calendar = schedule.get_default_calendar() now = datetime.now() date_query = calendar \\ .new_query('start') \\ .greater_equal(datetime(now.year,", ".chain('and') \\ .on_attribute('end') \\ .less_equal(datetime(now.year, now.month, now.day, 23, 59, 59)) events = list(calendar.get_events(query=date_query,", "getenv from os.path import isfile from dotenv import load_dotenv from O365 import Account", "0, 0, 0)) \\ .chain('and') \\ .on_attribute('end') \\ .less_equal(datetime(now.year, now.month, now.day, 23, 59,", "(getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials) account = Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if not isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default',", "if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!') else: account.connection.refresh_token() schedule = account.schedule() calendar = schedule.get_default_calendar() now", "= datetime.now() date_query = calendar \\ .new_query('start') \\ .greater_equal(datetime(now.year, now.month, now.day, 0, 0,", "datetime import datetime from os import getenv from os.path import isfile from dotenv", "auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if not isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!') else: account.connection.refresh_token() schedule =", "not isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!') else: account.connection.refresh_token() schedule = account.schedule() calendar =", "O365 import Account load_dotenv() credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials) account = Account(credentials, auth_flow_type='authorization',", "from O365 import Account load_dotenv() credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials) account = Account(credentials,", "calendar \\ .new_query('start') \\ .greater_equal(datetime(now.year, now.month, now.day, 0, 0, 0)) \\ .chain('and') \\", "= Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if not isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!') else: account.connection.refresh_token()", "schedule.get_default_calendar() now = datetime.now() date_query = calendar \\ .new_query('start') \\ .greater_equal(datetime(now.year, now.month, now.day,", "datetime.now() date_query = calendar \\ .new_query('start') \\ .greater_equal(datetime(now.year, now.month, now.day, 0, 0, 0))", "isfile from dotenv import load_dotenv from O365 import Account load_dotenv() credentials = (getenv('APPLICATION_ID'),", "load_dotenv() credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials) account = Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if not", "isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!') else: account.connection.refresh_token() schedule = account.schedule() calendar = schedule.get_default_calendar()", "0, 0)) \\ .chain('and') \\ .on_attribute('end') \\ .less_equal(datetime(now.year, now.month, now.day, 23, 59, 59))", "= (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials) account = Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if not isfile(\"./o365_token.txt\"): if", "\\ .chain('and') \\ .on_attribute('end') \\ .less_equal(datetime(now.year, now.month, now.day, 23, 59, 59)) events =", "load_dotenv from O365 import Account load_dotenv() credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials) account =", "from datetime import datetime from os import getenv from os.path import isfile from", "import Account load_dotenv() credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials) account = Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID'))", "os.path import isfile from dotenv import load_dotenv from O365 import Account load_dotenv() credentials", "= calendar \\ .new_query('start') \\ .greater_equal(datetime(now.year, now.month, now.day, 0, 0, 0)) \\ .chain('and')", ".new_query('start') \\ .greater_equal(datetime(now.year, now.month, now.day, 0, 0, 0)) \\ .chain('and') \\ .on_attribute('end') \\", ".on_attribute('end') \\ .less_equal(datetime(now.year, now.month, now.day, 23, 59, 59)) events = list(calendar.get_events(query=date_query, include_recurring=True)) with", "import getenv from os.path import isfile from dotenv import load_dotenv from O365 import", "Account load_dotenv() credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials) account = Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if", "credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials) account = Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if not isfile(\"./o365_token.txt\"):", "account = Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if not isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!') else:", "Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if not isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!') else: account.connection.refresh_token() schedule", "'offline_access']): print('Authenticated!') else: account.connection.refresh_token() schedule = account.schedule() calendar = schedule.get_default_calendar() now = datetime.now()", "23, 59, 59)) events = list(calendar.get_events(query=date_query, include_recurring=True)) with open(\"events.txt\", \"w\") as f: f.write(\"\\n\".join([event.start.isoformat()", "= list(calendar.get_events(query=date_query, include_recurring=True)) with open(\"events.txt\", \"w\") as f: f.write(\"\\n\".join([event.start.isoformat() for event in events]))", "os import getenv from os.path import isfile from dotenv import load_dotenv from O365", "datetime from os import getenv from os.path import isfile from dotenv import load_dotenv", "<filename>get_events.py from datetime import datetime from os import getenv from os.path import isfile", "account.connection.refresh_token() schedule = account.schedule() calendar = schedule.get_default_calendar() now = datetime.now() date_query = calendar", "schedule = account.schedule() calendar = schedule.get_default_calendar() now = datetime.now() date_query = calendar \\", ".greater_equal(datetime(now.year, now.month, now.day, 0, 0, 0)) \\ .chain('and') \\ .on_attribute('end') \\ .less_equal(datetime(now.year, now.month,", "\\ .less_equal(datetime(now.year, now.month, now.day, 23, 59, 59)) events = list(calendar.get_events(query=date_query, include_recurring=True)) with open(\"events.txt\",", ".less_equal(datetime(now.year, now.month, now.day, 23, 59, 59)) events = list(calendar.get_events(query=date_query, include_recurring=True)) with open(\"events.txt\", \"w\")", "59, 59)) events = list(calendar.get_events(query=date_query, include_recurring=True)) with open(\"events.txt\", \"w\") as f: f.write(\"\\n\".join([event.start.isoformat() for", "now.month, now.day, 23, 59, 59)) events = list(calendar.get_events(query=date_query, include_recurring=True)) with open(\"events.txt\", \"w\") as", "now.day, 0, 0, 0)) \\ .chain('and') \\ .on_attribute('end') \\ .less_equal(datetime(now.year, now.month, now.day, 23,", "dotenv import load_dotenv from O365 import Account load_dotenv() credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET')) print(credentials)", "= account.schedule() calendar = schedule.get_default_calendar() now = datetime.now() date_query = calendar \\ .new_query('start')", "else: account.connection.refresh_token() schedule = account.schedule() calendar = schedule.get_default_calendar() now = datetime.now() date_query =", "from os.path import isfile from dotenv import load_dotenv from O365 import Account load_dotenv()", "from dotenv import load_dotenv from O365 import Account load_dotenv() credentials = (getenv('APPLICATION_ID'), getenv('CLIENT_SECRET'))", "59)) events = list(calendar.get_events(query=date_query, include_recurring=True)) with open(\"events.txt\", \"w\") as f: f.write(\"\\n\".join([event.start.isoformat() for event", "0)) \\ .chain('and') \\ .on_attribute('end') \\ .less_equal(datetime(now.year, now.month, now.day, 23, 59, 59)) events", "account.schedule() calendar = schedule.get_default_calendar() now = datetime.now() date_query = calendar \\ .new_query('start') \\", "\\ .on_attribute('end') \\ .less_equal(datetime(now.year, now.month, now.day, 23, 59, 59)) events = list(calendar.get_events(query=date_query, include_recurring=True))", "\\ .new_query('start') \\ .greater_equal(datetime(now.year, now.month, now.day, 0, 0, 0)) \\ .chain('and') \\ .on_attribute('end')", "from os import getenv from os.path import isfile from dotenv import load_dotenv from", "events = list(calendar.get_events(query=date_query, include_recurring=True)) with open(\"events.txt\", \"w\") as f: f.write(\"\\n\".join([event.start.isoformat() for event in", "getenv('CLIENT_SECRET')) print(credentials) account = Account(credentials, auth_flow_type='authorization', tenant_id=getenv('TENANT_ID')) if not isfile(\"./o365_token.txt\"): if account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']):", "print('Authenticated!') else: account.connection.refresh_token() schedule = account.schedule() calendar = schedule.get_default_calendar() now = datetime.now() date_query", "account.authenticate(scopes=['https://graph.microsoft.com/.default', 'offline_access']): print('Authenticated!') else: account.connection.refresh_token() schedule = account.schedule() calendar = schedule.get_default_calendar() now =", "now = datetime.now() date_query = calendar \\ .new_query('start') \\ .greater_equal(datetime(now.year, now.month, now.day, 0," ]
[ "to integer # integer_number will contain 15 integer_number = int(string_number) print(integer_number) # Output:", "integer # integer_number will contain 15 integer_number = int(string_number) print(integer_number) # Output: 15", "# converting to integer # integer_number will contain 15 integer_number = int(string_number) print(integer_number)", "string_number = '15' # converting to integer # integer_number will contain 15 integer_number", "converting to integer # integer_number will contain 15 integer_number = int(string_number) print(integer_number) #", "= '15' # converting to integer # integer_number will contain 15 integer_number =", "'15' # converting to integer # integer_number will contain 15 integer_number = int(string_number)" ]
[ "def get_next_person(user): person = get_random_person() while person in user['people_seen']: person = get_random_person() return", "get_next_person(user): person = get_random_person() while person in user['people_seen']: person = get_random_person() return person" ]
[ "relation = relations[diff] logging.info(f\"Relation found: {relation.name}\") else: relation = None logging.info(\"no relation found\")", "logging.info(f\"first_bond2: {first_bond}\") if first_bond and not first_bond.direction_category: direction = None assert first_bond assert", "assert len(changed_objects) < 2 # if there are no changed objects, propose a", "destination.right_bond.right_object search = True assert destination != source logging.info(f\"proposing group from {source} to", "enough - post builder & activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0 if group.direction_category: group.direction_category.buffer", "assert category if category == slipnet.identity: category = slipnet.sameness coderack.propose_bond( source, destination, category,", "100.0 coderack.new_codelet(\"group-builder\", codelet, strength) def group_builder(codelet): # update strength value of the group", "category = group.group_category.get_related_node(slipnet.bond_category) facet = group.facet new_bond = Bond( source, destination, category, facet,", "from .workspace_object import WorkspaceObject # some methods common to the codelets def __show_which_string_object_is_from(structure):", "bond_facet, source_descriptor, destination_descriptor, codelet, ) def bond_strength_tester(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() strength", "to which it belongs leftmost = None for objekt in string.objects: if objekt.leftmost:", "structure2.total_strength * weight2 ) rhs = (weighted_strength1 + weighted_strength2) * random.random() logging.info(f\"{weighted_strength1} >", "> initials: string = workspace.target logging.info(f\"target string selected: {workspace.target} for {type_name}\") else: logging.info(f\"initial", "def __get_cut_off(density): if density > 0.8: distribution = [5.0, 150.0, 5.0, 2.0, 1.0,", "string selected: {workspace.target} for {type_name}\") else: logging.info(f\"initial string selected: {workspace.initial} for {type_name}\") source", "# choose the relation (change the letmost object to \"successor\" or \"d\" object_list", "correspondence, correspondence_spans, incompatible, incompatible_spans ) incompatible_bond = None incompatible_group = None # if", "= True while search: search = False if not destination.right_bond: continue if destination.right_bond.category", "are no changed objects, propose a rule with no changes if not changed_objects:", "source_descriptor, destination_descriptor def __all_opposite_mappings(mappings): return len([m for m in mappings if m.label !=", "for structure in break_objects: structure.break_the_structure() def bottom_up_description_scout(codelet): chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object", "if random.random() > 0.5: string = workspace.target logging.info(f\"target string selected: {workspace.target}\") else: logging.info(f\"initial", "= coderack.codelets_run + 100 temperature.clamped = True formulas.Temperature = 100.0 def bottom_up_correspondence_scout(codelet): object_from_initial", "node = node.apply_slippages(slippages) if target_object.described(node): if target_object.distinguishing_descriptor(node): new_list += [node] object_list = new_list", "source, destination ) forward_bond = source_descriptor.get_bond_category(destination_descriptor) if forward_bond == slipnet.identity: forward_bond = slipnet.sameness", "!= category: continue if source.left_bond.direction_category != direction: if source.left_bond.direction_category: continue if not bond_facet", "5.0, 2.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.2: distribution = [1.0,", "1.0 ) # if there is an incompatible rule, fight against it incompatible_rule", "object2 destination = object1 category = group.group_category.get_related_node(slipnet.bond_category) facet = group.facet new_bond = Bond(", "= 100.0 mapping.target_descriptor.buffer = 100.0 coderack.new_codelet(\"correspondence-builder\", codelet, strength, correspondence) def correspondence_builder(codelet): correspondence =", "= (weighted_strength1 + weighted_strength2) * random.random() logging.info(f\"{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}\") return", "bond_density > 1.0: bond_density = 1.0 cutoff = __get_cut_off(bond_density) * 10.0 assert cutoff", "1.0] elif density > 0.6: distribution = [2.0, 5.0, 150.0, 5.0, 2.0, 1.0,", "workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength() strength = correspondence.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random()", "__fight_incompatibles(incompatible_bonds, bond, \"bonds\", 1.0, 1.0) incompatible_groups = bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups, bond, \"groups\", 1.0,", "direction = slipnet.right elif source.rightmost: direction = slipnet.left else: activations = [slipnet.left.activation] activations", "incompatible.break_the_structure() # create new bonds group.bond_list = [] for i in range(1, len(group.object_list)):", "= [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): mydirection = slipnet.left else: mydirection", "= source.left_bond.left_object search = True destination = source search = True while search:", "== 0 def __structure_versus_structure(structure1, weight1, structure2, weight2): structure1.update_strength() structure2.update_strength() weighted_strength1 = formulas.temperature_adjusted_value( structure1.total_strength", "pylint: disable=too-many-statements def top_down_group_scout__category(codelet): group_category = codelet.arguments[0] category = group_category.get_related_node(slipnet.bond_category) assert category source", "object in group with these bonds search = True destination = source while", "ones to the existing corr. existing = correspondence.object_from_initial.correspondence for mapping in correspondence.concept_mappings: if", "= group.single_letter_group_probability() assert random.random() >= probability coderack.propose_single_letter_group(source, codelet) return direction = first_bond.direction_category search", "direction, formulas.local_direction_category_relevance, \"direction\" ) logging.info(f\"source chosen = {source}\") assert not source.spans_string() if source.leftmost:", "1.0, 1.0) incompatible_groups = bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups, bond, \"groups\", 1.0, 1.0) # fight", "object_list = new_list # should this be += ?? assert object_list # use", "def rule_strength_tester(codelet): rule = codelet.arguments[0] rule.update_strength() probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0) assert random.random()", "letter_of_initial_string.left_index more_letters = [ o for o in workspace.modified.objects if isinstance(o, Letter) and", "if abs(diff) < 2: relations = {0: slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor} relation", "# fight against other rules if workspace.rule: assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0) workspace.build_rule(rule)", "1.0] stop = sum(distribution) * random.random() total = 0.0 for i in range(0,", "object to \"successor\" or \"d\" object_list = [] if changed.replacement.relation: object_list += [changed.replacement.relation]", "def rule_translator(): assert workspace.rule if len(workspace.initial) == 1 and len(workspace.target) == 1: bond_density", "= formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) assert concept_mappings assert __slippability(concept_mappings) # find", "= slipnet.right elif source.rightmost: direction = slipnet.left else: activations = [slipnet.left.activation] activations +=", "= [ o for o in workspace.initial.objects if not o != changed and", "= [m.initial_description_type for m in opposites] flip_target_object = False if ( object_from_initial.spans_string() and", "codelet ) def description_strength_tester(codelet): description = codelet.arguments[0] description.descriptor.buffer = 100.0 description.update_strength() strength =", ".workspace_object import WorkspaceObject # some methods common to the codelets def __show_which_string_object_is_from(structure): if", ") destination = choose_neighbour(source) logging.info(f\"source: {source}, destination: {destination}\") assert destination bond_facet = __get_bond_facet(source,", "= source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category = slipnet.sameness logging.info(f\"proposing {category.name}", "string_bond in bond.string.bonds: if bond.same_neighbours(string_bond) and bond.same_categories(string_bond): if bond.direction_category: bond.direction_category.buffer = 100.0 bond.category.buffer", "# if incompatible bonds exist - fight group.update_strength() assert __fight_incompatibles(incompatible_bonds, group, \"bonds\", 1.0,", "{workspace.initial}\") # find leftmost object & the highest group to which it belongs", "5.0, 2.0, 1.0, 1.0] stop = sum(distribution) * random.random() total = 0.0 for", "in the workspace # object_list = the union of this and the distingushing", "len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds, bond, \"bonds\", 1.0, 1.0) incompatible_groups = bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups,", "= correspondence.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability # activate", "source assert not source.spans_string() if source.leftmost: direction = slipnet.right elif source.rightmost: direction =", "return False # start the actual codelets def breaker(): probability_of_fizzle = (100.0 -", "!= source objects = [source] bonds = [] while source != destination: bonds", "flip_target_object = True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def important_object_correspondence_scout(codelet): object_from_initial", "object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) assert concept_mappings assert __slippability(concept_mappings) # find out if any are", "logging.info(f\"source: {source}, destination: {destination}\") assert destination bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor =", "incompatible_bond, 2.0 ) # won against incompatible bond incompatible_group = target.group if incompatible_group:", "o.left_index == position ] letter_of_modified_string = more_letters and more_letters[0] or None assert letter_of_modified_string", "activations += [slipnet.right.activation] if not formulas.select_list_position(activations): mydirection = slipnet.left else: mydirection = slipnet.right", "leftmost.spans_string(): # the object already spans the string - propose this object group", "flip_target_object, codelet, ) def correspondence_strength_tester(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target =", "target_object.distinguishing_descriptor(node): new_list += [node] object_list = new_list # should this be += ??", "= None for objekt in string.objects: if objekt.leftmost: leftmost = objekt while leftmost.group", "not first_bond or first_bond.category != category: if category == slipnet.sameness and isinstance(source, Letter):", ") # if there is an incompatible rule, fight against it incompatible_rule =", "the correspondence exists, activate concept mappings # and add new ones to the", "choose_neighbour(source) assert destination logging.info(f\"destination: {destination}\") bond_facet = __get_bond_facet(source, destination) logging.info(f\"chosen bond facet: {bond_facet.get_name()}\")", "group with these bonds search = True while search: search = False if", "structure, structure_weight, incompatible, incompatible_weight ): logging.info(f\"lost fight with {incompatible}\") return False logging.info(f\"won fight", "destination.get_descriptor(bond_facet) assert source_descriptor assert destination_descriptor return source_descriptor, destination_descriptor def __all_opposite_mappings(mappings): return len([m for", "= [leftmost] while leftmost.right_bond: bonds += [leftmost.right_bond] leftmost = leftmost.right_bond.right_object objects += [leftmost]", "1.0, 1.0, 1.0, 1.0] elif density > 0.6: distribution = [2.0, 5.0, 150.0,", "found for {letter_of_initial_string}, so fizzling\" ) return position = letter_of_initial_string.left_index more_letters = [", "= initial_relevance + initial_unhappiness if randomized > initials: string = workspace.target logging.info(f\"target string", "100.0 if not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings += [mapping] return incompatibles = correspondence.get_incompatible_correspondences() # fight", "= Replacement( letter_of_initial_string, letter_of_modified_string, relation ) if relation != slipnet.sameness: letter_of_initial_string.changed = True", "= source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category = slipnet.sameness coderack.propose_bond( source,", "assert bonds group_category = category.get_related_node(slipnet.group_category) coderack.propose_group( objects, bonds, group_category, direction_category, bond_facet, codelet )", "correspondence.update_strength() strength = correspondence.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability", "try to break all objects for structure in break_objects: break_probability = formulas.temperature_adjusted_probability( structure.total_strength", ".workspace_formulas import workspace from .workspace_object import WorkspaceObject # some methods common to the", "bond, \"groups\", 1.0, 1.0) # fight all incompatible correspondences incompatible_correspondences = [] if", "!= category: # check the other side of object if direction == slipnet.right:", "= source.left_bond else: first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond\") else: logging.info(f\"first_bond:", "formulas.temperature_adjusted_probability( structure.total_strength / 100.0 ) if formulas.coin_flip(break_probability): return for structure in break_objects: structure.break_the_structure()", "logging.info(\"succeeded: posting bond-builder\") coderack.new_codelet(\"bond-builder\", codelet, strength) def bond_builder(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength()", "workspace.structures if isinstance(s, (Group, Bond, Correspondence)) ] assert structures structure = random.choice(structures) __show_which_string_object_is_from(structure)", "assert first_bond.direction_category == direction logging.info(f\"possible group: {first_bond}\") category = first_bond.category assert category group_category", "string = workspace.initial relevances = initial_relevance + target_relevance unhappinesses = initial_unhappiness + target_unhappiness", "slipnet.right elif source.rightmost: direction = slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation]", "description = codelet.arguments[0] assert description.object in workspace.objects if description.object.described(description.descriptor): description.description_type.buffer = 100.0 description.descriptor.buffer", "fizzling\" ) return position = letter_of_initial_string.left_index more_letters = [ o for o in", "for incompatible in incompatibles: incompatible.break_the_structure() # break incompatible group and bond if they", "letter_of_initial_string, letter_of_modified_string, relation ) if relation != slipnet.sameness: letter_of_initial_string.changed = True workspace.changed_object =", "__structure_versus_structure( correspondence, 1.0, incompatible_rule, 1.0 ) for incompatible in incompatibles: incompatible.break_the_structure() # break", "+= distribution[i] if total >= stop: return i + 1 return len(distribution) def", ") correspondence.update_strength() strength = correspondence.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <=", "150.0, 5.0, 2.0, 1.0, 1.0] stop = sum(distribution) * random.random() total = 0.0", "= object_from_initial.relevant_distinguishing_descriptors() slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode initial_descriptor = slipnode for mapping in", "group: {first_bond}\") category = first_bond.category assert category group_category = category.get_related_node(slipnet.group_category) logging.info(f\"trying from {source}", "slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond or first_bond.category", "initial_descriptor: target_candidates += [objekt] assert target_candidates object_from_target = choose_unmodified_object( \"inter_string_salience\", target_candidates ) assert", "fight against all correspondences if incompatibles: correspondence_spans = ( correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span() )", "equivalent = group.string.equivalent_group(group) if equivalent: logging.info(\"already exists...activate descriptors & fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return", "= True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def correspondence_strength_tester(codelet): correspondence =", "o.changed] # assert len(changed_objects) < 2 # if there are no changed objects,", "bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination ) forward_bond", "Bond): if structure.source.group: if structure.source.group == structure.destination.group: break_objects += [structure.source.group] # try to", "# use conceptual depth to choose a description value_list = [] for node", "group.string.equivalent_group(group) if equivalent: logging.info(\"already exists...activate descriptors & fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return # check", "= choose_unmodified_object( \"relative_importance\", workspace.initial.objects ) descriptors = object_from_initial.relevant_distinguishing_descriptors() slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode", "return \"target\" if structure.string == workspace.initial: return \"initial\" return \"other\" def __get_scout_source(slipnode, relevance_method,", "group.facet, codelet, ) return bonds = [] objects = [leftmost] while leftmost.right_bond: bonds", "[source] bonds = [] while source != destination: bonds += [source.right_bond] objects +=", "relevance = {target_relevance}, \" f\"unhappiness = {int(target_unhappiness)}\" ) string = workspace.initial relevances =", "incompatible_bond: # bond found - fight against it assert __structure_versus_structure( correspondence, 3.0, incompatible_bond,", "Replacement( letter_of_initial_string, letter_of_modified_string, relation ) if relation != slipnet.sameness: letter_of_initial_string.changed = True workspace.changed_object", "logging.info(f\"chosen bond facet: {bond_facet.get_name()}\") logging.info(f\"Source: {source}, destination: {destination}\") bond_descriptors = __get_descriptors(bond_facet, source, destination)", "it is the only one of its type in the string object_list =", "else: logging.info(f\"first_bond: {first_bond}\") if first_bond and not first_bond.direction_category: direction = None if not", "(change the letmost object to \"successor\" or \"d\" object_list = [] if changed.replacement.relation:", "coderack.propose_single_letter_group(source, codelet) return direction = first_bond.direction_category search = True bond_facet = None #", "target_not_flipped = False initial_in_objects = object_from_initial in workspace.objects target_in_objects = object_from_target in workspace.objects", "= 100.0 logging.info(\"already exists: activate descriptors & Fizzle\") return incompatible_bonds = bond.get_incompatible_bonds() logging.info(f\"number", "from list chosen_bond = random.choice(bonds) category = chosen_bond.category direction_category = chosen_bond.direction_category bond_facet =", "find leftmost object in group with these bonds search = True while search:", "2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0] else: distribution = [1.0, 1.0,", ".workspace_formulas import choose_unmodified_object from .workspace_formulas import workspace from .workspace_object import WorkspaceObject # some", "random.random() * (relevances + unhappinesses) initials = initial_relevance + initial_unhappiness if randomized >", "to the codelets def __show_which_string_object_is_from(structure): if not structure: return \"unstructured\" if isinstance(structure, WorkspaceObject):", "m in distinguishing_mappings if m.initial_description_type == slipnet.string_position_category and m.initial_description_type != slipnet.bond_facet ] initial_description_types", "object_list += [letter] # if this object corresponds to another object in the", "coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def correspondence_strength_tester(codelet): correspondence = codelet.arguments[0] object_from_initial", "\" f\"unhappiness = {int(initial_unhappiness)}\" ) logging.info( f\"target : relevance = {target_relevance}, \" f\"unhappiness", "rhs = (weighted_strength1 + weighted_strength2) * random.random() logging.info(f\"{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}\")", "logging.info(f\"first_bond: {first_bond}\") if first_bond and not first_bond.direction_category: direction = None if not first_bond", "+= [structure.source.group] # try to break all objects for structure in break_objects: break_probability", "descriptors = object_from_initial.relevant_distinguishing_descriptors() slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode initial_descriptor = slipnode for mapping", "left_bond: if left_bond.left_object == previous: continue if left_bond.direction_category == group.direction_category: continue incompatible_bonds +=", "{destination}\") bond_descriptors = __get_descriptors(bond_facet, source, destination) source_descriptor, destination_descriptor = bond_descriptors logging.info(f\"source descriptor: {source_descriptor.name.upper()}\")", "objekt while leftmost.group and leftmost.group.bond_category == slipnet.sameness: leftmost = leftmost.group if leftmost.spans_string(): #", "break incompatible correspondences\") assert __fight(bond, 2.0, incompatible_correspondences, 3.0) for incompatible in incompatible_bonds: incompatible.break_the_structure()", "* (relevances + unhappinesses) initials = initial_relevance + initial_unhappiness if randomized > initials:", "destination): bond_facet = choose_bond_facet(source, destination) assert bond_facet return bond_facet def __get_descriptors(bond_facet, source, destination):", "coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def top_down_description_scout(codelet): description_type = codelet.arguments[0] chosen_object =", "range(0, len(distribution)): total += distribution[i] if total >= stop: return i + 1", "logging import random from . import formulas from . import temperature from .bond", "these objects incompatible_groups = group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups, group, \"Groups\", 1.0, 1.0) for incompatible", "mapping in correspondence.concept_mappings: mapping.initial_description_type.buffer = 100.0 mapping.initial_descriptor.buffer = 100.0 mapping.target_description_type.buffer = 100.0 mapping.target_descriptor.buffer", "search = True assert destination != source logging.info(f\"proposing group from {source} to {destination}\")", "logging.info(f\"no incompatible {name}\") return True def __slippability(concept_mappings): for mapping in concept_mappings: slippiness =", "group_builder(codelet): # update strength value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) equivalent", "or None assert letter_of_modified_string position -= 1 initial_ascii = ord(workspace.initial_string[position]) modified_ascii = ord(workspace.modified_string[position])", "structure.source.group == structure.destination.group: break_objects += [structure.source.group] # try to break all objects for", "= True destination = source search = True while search: search = False", "chosen_property = descriptions[i] coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def description_strength_tester(codelet): description =", "search = True while search: search = False if not source.left_bond: continue if", "object_list # use conceptual depth to choose a description value_list = [] for", "def group_strength_tester(codelet): # update strength value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group)", ") def correspondence_strength_tester(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target assert", "objects, bonds, group_category, direction, bond_facet, codelet ) # noinspection PyStringFormat def group_scout__whole_string(codelet): string", "= source.left_bond.facet direction = source.left_bond.direction_category source = source.left_bond.left_object search = True destination =", "1.0, 1.0, 1.0] elif density > 0.6: distribution = [2.0, 5.0, 150.0, 5.0,", "destination.right_bond.direction_category: continue if not bond_facet or bond_facet == destination.right_bond.facet: bond_facet = destination.right_bond.facet direction", "1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0] stop = sum(distribution) *", "bond list if len(group.object_list) > 1: previous = group.object_list[0] for objekt in group.object_list[1:]:", "category = source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category = slipnet.sameness logging.info(f\"proposing", "previous = objekt next_object = group.object_list[-1] for objekt in reversed(group.object_list[:-1]): right_bond = objekt.right_bond", "/ 100.0) assert random.random() <= probability # it is strong enough - post", "False if not source.left_bond: continue if source.left_bond.category != category: continue if source.left_bond.direction_category !=", "1.0, workspace.rule, 1.0) workspace.build_rule(rule) def __get_cut_off(density): if density > 0.8: distribution = [5.0,", "in workspace.slippages(): if mapping.initial_descriptor == slipnode: initial_descriptor = mapping.target_descriptor target_candidates = [] for", "be += ?? assert object_list # use conceptual depth to choose a description", "for node in object_list: depth = node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list += [value]", "mapping.initial_descriptor.buffer = 100.0 mapping.target_description_type.buffer = 100.0 mapping.target_descriptor.buffer = 100.0 coderack.new_codelet(\"correspondence-builder\", codelet, strength, correspondence)", "structure = random.choice(structures) __show_which_string_object_is_from(structure) break_objects = [structure] if isinstance(structure, Bond): if structure.source.group: if", "concept_mappings assert __slippability(concept_mappings) # find out if any are distinguishing distinguishing_mappings = [m", "codelet.arguments[0] assert description.object in workspace.objects if description.object.described(description.descriptor): description.description_type.buffer = 100.0 description.descriptor.buffer = 100.0", "if isinstance(o, Letter)] letter_of_initial_string = random.choice(letters) logging.info(f\"selected letter in initial string = {letter_of_initial_string}\")", "for objekt in string.objects: if objekt.leftmost: leftmost = objekt while leftmost.group and leftmost.group.bond_category", "Fizzle\") return False logging.info(f\"no incompatible {name}\") return True def __slippability(concept_mappings): for mapping in", "changed and o.get_description_type(letter) ] if not len(other_objects_of_same_letter): object_list += [letter] # if this", "object1 = group.object_list[i - 1] object2 = group.object_list[i] if not object1.right_bond: if group.direction_category", "bottom_up_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects ) object_from_target = choose_unmodified_object( \"inter_string_salience\", workspace.target.objects )", "objekt # if incompatible bonds exist - fight group.update_strength() assert __fight_incompatibles(incompatible_bonds, group, \"bonds\",", "in workspace.objects or bond.destination in workspace.objects for string_bond in bond.string.bonds: if bond.same_neighbours(string_bond) and", "False # start the actual codelets def breaker(): probability_of_fizzle = (100.0 - formulas.Temperature)", "None if not first_bond or first_bond.direction_category != direction: if mydirection == slipnet.right: first_bond", "temperature.clamp_time = coderack.codelets_run + 100 temperature.clamped = True formulas.Temperature = 100.0 def bottom_up_correspondence_scout(codelet):", "in the string object_list = [] position = changed.get_descriptor(slipnet.string_position_category) if position: object_list +=", "!= source logging.info(f\"proposing group from {source} to {destination}\") objects = [source] bonds =", "slipnet.identity: forward_bond = slipnet.sameness backward_bond = slipnet.sameness else: backward_bond = destination_descriptor.get_bond_category(source_descriptor) assert category", "m.distinguishing()] assert distinguishing_mappings # if both objects span the strings, check to see", "i in range(0, len(distribution)): total += distribution[i] if total >= stop: return i", "random.random() >= probability coderack.propose_single_letter_group(source, codelet) return direction = first_bond.direction_category search = True bond_facet", "[ o for o in workspace.modified.objects if isinstance(o, Letter) and o.left_index == position", "for objekt in reversed(group.object_list[:-1]): right_bond = objekt.right_bond if right_bond: if right_bond.right_object == next_object:", "groups # fight all groups containing these objects incompatible_groups = group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups,", "posible concept mappings concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) assert concept_mappings", "with no changes if not changed_objects: return coderack.propose_rule(None, None, None, None, codelet) changed", "[1.0, 1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0] stop = sum(distribution)", "2: relations = {0: slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor} relation = relations[diff] logging.info(f\"Relation", "correspondence.flip_target_object and not workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength() strength = correspondence.total_strength probability = formulas.temperature_adjusted_probability(strength /", "and target.leftmost or target.rightmost: # search for the incompatible bond incompatible_bond = correspondence.get_incompatible_bond()", "depth to choose a description value_list = [] for node in object_list: depth", "= 0.0 for i in range(0, len(distribution)): total += distribution[i] if total >=", "to break all objects for structure in break_objects: break_probability = formulas.temperature_adjusted_probability( structure.total_strength /", "string selected: {workspace.target}\") else: logging.info(f\"initial string selected: {workspace.initial}\") # find leftmost object &", "= formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability # activate some concepts for", "relation, codelet ) def rule_strength_tester(codelet): rule = codelet.arguments[0] rule.update_strength() probability = formulas.temperature_adjusted_probability(rule.total_strength /", "a structure at random structures = [ s for s in workspace.structures if", "target_object = changed.correspondence.object_from_target new_list = [] slippages = workspace.slippages() for node in object_list:", "assert destination != source objects = [source] bonds = [] while source !=", "if not changed_objects: return coderack.propose_rule(None, None, None, None, codelet) changed = changed_objects[-1] #", "its type in the string object_list = [] position = changed.get_descriptor(slipnet.string_position_category) if position:", "incompatible_rule = None if workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule = workspace.rule assert __structure_versus_structure( correspondence,", "not source.spans_string() if source.leftmost: mydirection = slipnet.right elif source.rightmost: mydirection = slipnet.left else:", "import formulas from . import temperature from .bond import Bond from .bond import", "this and the distingushing descriptors if changed.correspondence: target_object = changed.correspondence.object_from_target new_list = []", "# won against incompatible bond incompatible_group = target.group if incompatible_group: assert __structure_versus_structure( correspondence,", "= leftmost.right_bond.right_object objects += [leftmost] assert leftmost.rightmost # choose a random bond from", "import Group from .letter import Letter from .replacement import Replacement from .slipnet import", "group.group_category.get_related_node(slipnet.bond_category) facet = group.facet new_bond = Bond( source, destination, category, facet, source.get_descriptor(facet), destination.get_descriptor(facet),", "= True assert destination != source logging.info(f\"proposing group from {source} to {destination}\") objects", "n in descriptions] i = formulas.select_list_position(values) chosen_property = descriptions[i] coderack.propose_description( chosen_object, chosen_property.category(), chosen_property,", "else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): mydirection = slipnet.left", "+= [slipnet.right.activation] if not formulas.select_list_position(activations): mydirection = slipnet.left else: mydirection = slipnet.right if", "incompatible bond incompatible_group = target.group if incompatible_group: assert __structure_versus_structure( correspondence, 1.0, incompatible_group, 1.0", "if workspace.rule.build_translated_rule(): workspace.found_answer = True else: temperature.clamp_time = coderack.codelets_run + 100 temperature.clamped =", "exists, activate concept mappings # and add new ones to the existing corr.", "/ 100.0 ) if formulas.coin_flip(break_probability): return for structure in break_objects: structure.break_the_structure() def bottom_up_description_scout(codelet):", "if there are no changed objects, propose a rule with no changes if", "destination = choose_directed_neighbor(source, direction) assert destination logging.info(f\"to object: {destination}\") bond_facet = __get_bond_facet(source, destination)", "slipnet.sameness: leftmost = leftmost.group if leftmost.spans_string(): # the object already spans the string", "choose_unmodified_object( \"inter_string_salience\", target_candidates ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get the posible concept", "{workspace.initial} for {type_name}\") source = choose_unmodified_object(\"intra_string_salience\", string.objects) return source def __get_bond_facet(source, destination): bond_facet", "target.rightmost: # search for the incompatible bond incompatible_bond = correspondence.get_incompatible_bond() if incompatible_bond: #", "category = slipnet.sameness logging.info(f\"proposing {category.name} bond \") coderack.propose_bond( source, destination, category, bond_facet, source_descriptor,", "else: mydirection = slipnet.right if mydirection == slipnet.left: first_bond = source.left_bond else: first_bond", "None logging.info(\"no relation found\") letter_of_initial_string.replacement = Replacement( letter_of_initial_string, letter_of_modified_string, relation ) if relation", "len(incompatibles)): return True for incompatible in incompatibles: if not __structure_versus_structure( structure, structure_weight, incompatible,", ".bond import Bond from .bond import possible_group_bonds from .coderack import coderack from .correspondence", "correspondence.get_incompatible_bond() if incompatible_bond: # bond found - fight against it assert __structure_versus_structure( correspondence,", "o in workspace.initial.objects if not o != changed and o.get_description_type(letter) ] if not", "formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability # it is strong enough -", "1.0, 1.0, 1.0, 1.0] elif density > 0.2: distribution = [1.0, 1.0, 2.0,", "mapping.initial_descriptor == slipnode: initial_descriptor = mapping.target_descriptor target_candidates = [] for objekt in workspace.target.objects:", "logging.info(f\"initial string selected: {workspace.initial} for {type_name}\") source = choose_unmodified_object(\"intra_string_salience\", string.objects) return source def", "len(other_objects_of_same_letter): object_list += [letter] # if this object corresponds to another object in", "flip_target_object, codelet, ) def important_object_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"relative_importance\", workspace.initial.objects ) descriptors =", "False if not destination.right_bond: continue if destination.right_bond.category != category: continue if destination.right_bond.direction_category !=", "random.random() <= probability # activate some concepts for mapping in correspondence.concept_mappings: mapping.initial_description_type.buffer =", "group from {source} to {destination}\") objects = [source] bonds = [] while source", "{relation.name}\") else: relation = None logging.info(\"no relation found\") letter_of_initial_string.replacement = Replacement( letter_of_initial_string, letter_of_modified_string,", "in group.object_list[1:]: left_bond = objekt.left_bond if left_bond: if left_bond.left_object == previous: continue if", "len(group.object_list)): object1 = group.object_list[i - 1] object2 = group.object_list[i] if not object1.right_bond: if", "f\"unhappiness = {int(initial_unhappiness)}\" ) logging.info( f\"target : relevance = {target_relevance}, \" f\"unhappiness =", ") for incompatible in incompatibles: incompatible.break_the_structure() # break incompatible group and bond if", "total >= stop: return i + 1 return len(distribution) def rule_translator(): assert workspace.rule", "__slippability(concept_mappings): for mapping in concept_mappings: slippiness = mapping.slippability() / 100.0 probability_of_slippage = formulas.temperature_adjusted_probability(slippiness)", "structure_weight, incompatibles, incompatible_weight): logging.info(f\"broke the {name}\") return True logging.info(f\"failed to break {name}: Fizzle\")", "leftmost coderack.propose_group( group.object_list, group.bond_list, group.group_category, group.direction_category, group.facet, codelet, ) return bonds = []", "[leftmost] while leftmost.right_bond: bonds += [leftmost.right_bond] leftmost = leftmost.right_bond.right_object objects += [leftmost] assert", "return len(distribution) def rule_translator(): assert workspace.rule if len(workspace.initial) == 1 and len(workspace.target) ==", "value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength() strength = group.total_strength probability", "group.facet new_bond = Bond( source, destination, category, facet, source.get_descriptor(facet), destination.get_descriptor(facet), ) new_bond.build_bond() group.bond_list", "group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return # check to see if all objects are still there", "if total >= stop: return i + 1 return len(distribution) def rule_translator(): assert", "__show_which_string_object_is_from(group) equivalent = group.string.equivalent_group(group) if equivalent: logging.info(\"already exists...activate descriptors & fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions)", "bond if they exist if incompatible_bond: incompatible_bond.break_the_structure() if incompatible_group: incompatible_group.break_the_structure() if incompatible_rule: workspace.break_rule()", "o in workspace.objects # check to see if bonds are there of the", ") logging.info( f\"target : relevance = {target_relevance}, \" f\"unhappiness = {int(target_unhappiness)}\" ) string", "= {strength} for {bond}\") assert formulas.coin_flip(probability) bond.facet.buffer = 100.0 bond.source_descriptor.buffer = 100.0 bond.destination_descriptor.buffer", "mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings += [mapping] return incompatibles = correspondence.get_incompatible_correspondences() # fight against all correspondences", "[ m for m in distinguishing_mappings if m.initial_description_type == slipnet.string_position_category and m.initial_description_type !=", "{incompatible}\") return True def __fight_incompatibles( incompatibles, structure, name, structure_weight, incompatible_weight ): if len(incompatibles):", "return True return False # start the actual codelets def breaker(): probability_of_fizzle =", "coderack.new_codelet(\"description-builder\", codelet, strength) def description_builder(codelet): description = codelet.arguments[0] assert description.object in workspace.objects if", "activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0 if group.direction_category: group.direction_category.buffer = 100.0 coderack.new_codelet(\"group-builder\", codelet, strength)", "== slipnode: initial_descriptor = mapping.target_descriptor target_candidates = [] for objekt in workspace.target.objects: for", "destination): source_descriptor = source.get_descriptor(bond_facet) destination_descriptor = destination.get_descriptor(bond_facet) assert source_descriptor assert destination_descriptor return source_descriptor,", "direction = slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations):", "assert chosen_object __show_which_string_object_is_from(chosen_object) descriptions = chosen_object.get_possible_descriptions(description_type) assert descriptions values = [n.activation for n", "changed.correspondence: target_object = changed.correspondence.object_from_target new_list = [] slippages = workspace.slippages() for node in", "letmost object to \"successor\" or \"d\" object_list = [] if changed.replacement.relation: object_list +=", "first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond2\") else: logging.info(f\"first_bond2: {first_bond}\") if first_bond", "len(group.object_list) > 1: previous = group.object_list[0] for objekt in group.object_list[1:]: left_bond = objekt.left_bond", "bond_facet, codelet ) def group_strength_tester(codelet): # update strength value of the group group", "mapping.label.buffer = 100.0 if not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings += [mapping] return incompatibles = correspondence.get_incompatible_correspondences()", "object_from_initial.relevant_distinguishing_descriptors() slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode initial_descriptor = slipnode for mapping in workspace.slippages():", "formulas.coin_flip(probability_of_slippage): return True return False # start the actual codelets def breaker(): probability_of_fizzle", "object_from_initial = choose_unmodified_object( \"relative_importance\", workspace.initial.objects ) descriptors = object_from_initial.relevant_distinguishing_descriptors() slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors) assert", "assert random.random() <= probability # activate some concepts for mapping in correspondence.concept_mappings: mapping.initial_description_type.buffer", "right_bond: if right_bond.right_object == next_object: continue if right_bond.direction_category == group.direction_category: continue incompatible_bonds +=", "workspace.initial.objects if isinstance(o, Letter)] letter_of_initial_string = random.choice(letters) logging.info(f\"selected letter in initial string =", "for m in mappings if m.label != slipnet.opposite]) == 0 def __structure_versus_structure(structure1, weight1,", "slipnet.direction_category in initial_description_types and __all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation != 100.0 ): object_from_target = object_from_target.flipped_version()", "logging.info(f\"won fight with {incompatible}\") return True def __fight_incompatibles( incompatibles, structure, name, structure_weight, incompatible_weight", "= source.left_bond else: first_bond = source.right_bond if not first_bond or first_bond.category != category:", "value_list += [value] i = formulas.select_list_position(value_list) relation = object_list[i] coderack.propose_rule( slipnet.letter_category, descriptor, slipnet.letter,", "import Bond from .bond import possible_group_bonds from .coderack import coderack from .correspondence import", "len([m for m in mappings if m.label != slipnet.opposite]) == 0 def __structure_versus_structure(structure1,", "not first_bond or first_bond.category != category: # check the other side of object", "== forward_bond: coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) else: coderack.propose_bond(", "bond_facet, source, destination ) category = source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity:", "forward_bond: coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) else: coderack.propose_bond( destination,", "correspondence) def correspondence_builder(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target want_flip", "not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings += [mapping] return incompatibles = correspondence.get_incompatible_correspondences() # fight against all", "incompatible_bond = correspondence.get_incompatible_bond() if incompatible_bond: # bond found - fight against it assert", "group_category, direction, bond_facet, codelet ) def top_down_group_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source(", "workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule = workspace.rule assert __structure_versus_structure( correspondence, 1.0, incompatible_rule, 1.0 )", "2.0, 1.0, 1.0, 1.0] else: distribution = [1.0, 1.0, 1.0, 2.0, 5.0, 150.0,", "= correspondence.get_incompatible_correspondences() # fight against all correspondences if incompatibles: correspondence_spans = ( correspondence.object_from_initial.letter_span()", "if leftmost.spans_string(): # the object already spans the string - propose this object", "category: continue if destination.right_bond.direction_category != direction: if destination.right_bond.direction_category: continue if not bond_facet or", "in group with these bonds while search: search = False if not source.left_bond:", "= formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode initial_descriptor = slipnode for mapping in workspace.slippages(): if mapping.initial_descriptor", "= object1 destination = object2 else: source = object2 destination = object1 category", "methods common to the codelets def __show_which_string_object_is_from(structure): if not structure: return \"unstructured\" if", "workspace.initial relevances = initial_relevance + target_relevance unhappinesses = initial_unhappiness + target_unhappiness randomized =", "= formulas.similar_property_links(description.descriptor) assert sliplinks values = [ sliplink.degree_of_association() * sliplink.destination.activation for sliplink in", "destination_descriptor, codelet, ) else: coderack.propose_bond( destination, source, category, bond_facet, destination_descriptor, source_descriptor, codelet, )", "= 100.0 bond.category.buffer = 100.0 logging.info(\"already exists: activate descriptors & Fizzle\") return incompatible_bonds", "group.object_list[i - 1] object2 = group.object_list[i] if not object1.right_bond: if group.direction_category == slipnet.right:", "# if there is an incompatible bond then fight against it initial =", "correspondences\") assert __fight(bond, 2.0, incompatible_correspondences, 3.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() for incompatible", "codelet, strength, correspondence) def correspondence_builder(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target =", "against it initial = correspondence.object_from_initial target = correspondence.object_from_target if initial.leftmost or initial.rightmost and", "initial_ascii - modified_ascii if abs(diff) < 2: relations = {0: slipnet.sameness, -1: slipnet.successor,", "from .correspondence import Correspondence from .group import Group from .letter import Letter from", "{initial_relevance}, \" f\"unhappiness = {int(initial_unhappiness)}\" ) logging.info( f\"target : relevance = {target_relevance}, \"", "for description in objekt.relevant_descriptions(): if description.descriptor == initial_descriptor: target_candidates += [objekt] assert target_candidates", "= len(workspace.initial) + len(workspace.target) - 2 bond_density = number_of_bonds / nearly_total_length if bond_density", "incompatible.break_the_structure() # break incompatible group and bond if they exist if incompatible_bond: incompatible_bond.break_the_structure()", "or bond_facet == destination.right_bond.facet: bond_facet = destination.right_bond.facet direction = source.right_bond.direction_category destination = destination.right_bond.right_object", "or first_bond.category != category: if category == slipnet.sameness and isinstance(source, Letter): group =", "= choose_unmodified_object(\"intra_string_salience\", string.objects) return source def __get_bond_facet(source, destination): bond_facet = choose_bond_facet(source, destination) assert", "object_from_target.spans_string() # get the posible concept mappings concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(),", "= None # if there is an incompatible bond then fight against it", "+= [changed.replacement.relation] object_list += [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] # use conceptual depth to choose", "bonds search = True destination = source while search: search = False if", "break_probability = formulas.temperature_adjusted_probability( structure.total_strength / 100.0 ) if formulas.coin_flip(break_probability): return for structure in", "leftmost = leftmost.group if leftmost.spans_string(): # the object already spans the string -", "mappings concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) assert concept_mappings assert __slippability(concept_mappings)", "if formulas.coin_flip(break_probability): return for structure in break_objects: structure.break_the_structure() def bottom_up_description_scout(codelet): chosen_object = choose_unmodified_object(\"total_salience\",", ") assert concept_mappings assert __slippability(concept_mappings) # find out if any are distinguishing distinguishing_mappings", "destination != source objects = [source] bonds = [] while source != destination:", "structure.source.group: if structure.source.group == structure.destination.group: break_objects += [structure.source.group] # try to break all", "!= category: continue if destination.right_bond.direction_category != direction: if destination.right_bond.direction_category: continue if not bond_facet", "& fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return # check to see if all objects are", "or bond.right_object.rightmost: if bond.direction_category: incompatible_correspondences = bond.get_incompatible_correspondences() if incompatible_correspondences: logging.info(\"trying to break incompatible", "correspondence_spans, incompatible, incompatible_spans ) incompatible_bond = None incompatible_group = None # if there", "from .workspace_formulas import choose_directed_neighbor from .workspace_formulas import choose_neighbour from .workspace_formulas import choose_unmodified_object from", "mapping in concept_mappings: slippiness = mapping.slippability() / 100.0 probability_of_slippage = formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage):", "= None assert first_bond assert first_bond.direction_category == direction logging.info(f\"possible group: {first_bond}\") category =", "= group.string.equivalent_group(group) if equivalent: logging.info(\"already exists...activate descriptors & fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return #", "random.random() <= probability coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength, rule) def replacement_finder(): # choose random letter", "if not o != changed and o.get_description_type(letter) ] if not len(other_objects_of_same_letter): object_list +=", "source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def rule_scout(codelet): assert workspace.number_of_unreplaced_objects() ==", "codelets def __show_which_string_object_is_from(structure): if not structure: return \"unstructured\" if isinstance(structure, WorkspaceObject): return \"target\"", "100.0 if group.direction_category: group.direction_category.buffer = 100.0 coderack.new_codelet(\"group-builder\", codelet, strength) def group_builder(codelet): # update", "assert __slippability(concept_mappings) # find out if any are distinguishing distinguishing_mappings = [m for", "string = workspace.initial if random.random() > 0.5: string = workspace.target logging.info(f\"target string selected:", "against incompatible bond incompatible_group = target.group if incompatible_group: assert __structure_versus_structure( correspondence, 1.0, incompatible_group,", "1.0, 1.0] stop = sum(distribution) * random.random() total = 0.0 for i in", "relations = {0: slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor} relation = relations[diff] logging.info(f\"Relation found:", "it initial = correspondence.object_from_initial target = correspondence.object_from_target if initial.leftmost or initial.rightmost and target.leftmost", "correspondences incompatible_correspondences = [] if bond.left_object.leftmost or bond.right_object.rightmost: if bond.direction_category: incompatible_correspondences = bond.get_incompatible_correspondences()", "= more_letters and more_letters[0] or None assert letter_of_modified_string position -= 1 initial_ascii =", "fight with {incompatible}\") return True def __fight_incompatibles( incompatibles, structure, name, structure_weight, incompatible_weight ):", "def top_down_bond_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"bond\" ) destination", "for m in opposites] flip_target_object = False if ( object_from_initial.spans_string() and object_from_target.spans_string() and", "incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span() ) assert __structure_versus_structure( correspondence, correspondence_spans, incompatible, incompatible_spans ) incompatible_bond =", "= leftmost coderack.propose_group( group.object_list, group.bond_list, group.group_category, group.direction_category, group.facet, codelet, ) return bonds =", "sliplinks ] i = formulas.select_list_position(values) chosen = sliplinks[i] chosen_property = chosen.destination coderack.propose_description( chosen_object,", "there are no changed objects, propose a rule with no changes if not", "o != changed and o.get_description_type(letter) ] if not len(other_objects_of_same_letter): object_list += [letter] #", "return True logging.info(f\"failed to break {name}: Fizzle\") return False logging.info(f\"no incompatible {name}\") return", "group.group_category, group.direction_category, group.facet, codelet, ) return bonds = [] objects = [leftmost] while", "__show_which_string_object_is_from(bond) bond.update_strength() assert bond.source in workspace.objects or bond.destination in workspace.objects for string_bond in", "choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source) destination = choose_neighbour(source) assert destination logging.info(f\"destination: {destination}\") bond_facet = __get_bond_facet(source,", "source.spans_string() if source.leftmost: direction = slipnet.right elif source.rightmost: direction = slipnet.left else: activations", "object2 = group.object_list[i] if not object1.right_bond: if group.direction_category == slipnet.right: source = object1", "{0: slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor} relation = relations[diff] logging.info(f\"Relation found: {relation.name}\") else:", "chosen_property.category(), chosen_property, codelet ) def description_strength_tester(codelet): description = codelet.arguments[0] description.descriptor.buffer = 100.0 description.update_strength()", "if it is the only one of its type in the string object_list", "100.0 mapping.target_description_type.buffer = 100.0 mapping.target_descriptor.buffer = 100.0 coderack.new_codelet(\"correspondence-builder\", codelet, strength, correspondence) def correspondence_builder(codelet):", "total += distribution[i] if total >= stop: return i + 1 return len(distribution)", "position = letter_of_initial_string.left_index more_letters = [ o for o in workspace.modified.objects if isinstance(o,", "in object_list: depth = node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list += [value] i =", "isinstance(structure, WorkspaceObject): return \"target\" if structure.string == workspace.initial: return \"initial\" return \"other\" def", "stop: return i + 1 return len(distribution) def rule_translator(): assert workspace.rule if len(workspace.initial)", "codelet ) def rule_strength_tester(codelet): rule = codelet.arguments[0] rule.update_strength() probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0)", "!= category: if category == slipnet.sameness and isinstance(source, Letter): group = Group( source.string,", "if letter_of_initial_string.replacement: logging.info( f\"Replacement already found for {letter_of_initial_string}, so fizzling\" ) return position", "chosen_bond.facet bonds = possible_group_bonds(category, direction_category, bond_facet, bonds) assert bonds group_category = category.get_related_node(slipnet.group_category) coderack.propose_group(", "= source_descriptor.get_bond_category(destination_descriptor) if forward_bond == slipnet.identity: forward_bond = slipnet.sameness backward_bond = slipnet.sameness else:", "bond found - fight against it assert __structure_versus_structure( correspondence, 3.0, incompatible_bond, 2.0 )", "temperature from .bond import Bond from .bond import possible_group_bonds from .coderack import coderack", "logging.info(\"already exists: activate descriptors & Fizzle\") return incompatible_bonds = bond.get_incompatible_bonds() logging.info(f\"number of incompatible_bonds:", "object already spans the string - propose this object group = leftmost coderack.propose_group(", "objekt next_object = group.object_list[-1] for objekt in reversed(group.object_list[:-1]): right_bond = objekt.right_bond if right_bond:", "relation = object_list[i] coderack.propose_rule( slipnet.letter_category, descriptor, slipnet.letter, relation, codelet ) def rule_strength_tester(codelet): rule", "target_relevance = relevance_method(workspace.target, slipnode) initial_unhappiness = workspace.initial.intra_string_unhappiness target_unhappiness = workspace.target.intra_string_unhappiness logging.info( f\"initial :", "object_from_target in workspace.objects or correspondence.flip_target_object and not workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength() strength = correspondence.total_strength", "workspace.target.objects ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get the posible concept mappings concept_mappings", "= codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"bond\" ) destination = choose_directed_neighbor(source, direction)", "= possible_group_bonds(category, direction_category, bond_facet, bonds) assert bonds group_category = category.get_related_node(slipnet.group_category) coderack.propose_group( objects, bonds,", "mapping.target_descriptor target_candidates = [] for objekt in workspace.target.objects: for description in objekt.relevant_descriptions(): if", "if group.direction_category: group.direction_category.buffer = 100.0 coderack.new_codelet(\"group-builder\", codelet, strength) def group_builder(codelet): # update strength", "1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0] stop = sum(distribution) * random.random()", "if not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings += [mapping] return incompatibles = correspondence.get_incompatible_correspondences() # fight against", "the object already spans the string - propose this object group = leftmost", "formulas.local_bond_category_relevance, \"bond\" ) destination = choose_neighbour(source) logging.info(f\"source: {source}, destination: {destination}\") assert destination bond_facet", "bond_facet = __get_bond_facet(source, destination) logging.info(f\"chosen bond facet: {bond_facet.get_name()}\") logging.info(f\"Source: {source}, destination: {destination}\") bond_descriptors", "breaker(): probability_of_fizzle = (100.0 - formulas.Temperature) / 100.0 assert not formulas.coin_flip(probability_of_fizzle) # choose", "in workspace.objects for string_bond in bond.string.bonds: if bond.same_neighbours(string_bond) and bond.same_categories(string_bond): if bond.direction_category: bond.direction_category.buffer", "logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds, bond, \"bonds\", 1.0, 1.0) incompatible_groups = bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups, bond,", "100.0) assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet, strength) def description_builder(codelet): description = codelet.arguments[0] assert description.object", "probability # activate some concepts for mapping in correspondence.concept_mappings: mapping.initial_description_type.buffer = 100.0 mapping.initial_descriptor.buffer", "__show_which_string_object_is_from(structure): if not structure: return \"unstructured\" if isinstance(structure, WorkspaceObject): return \"target\" if structure.string", "correspondence.reflexive(): # if the correspondence exists, activate concept mappings # and add new", "= __get_cut_off(bond_density) * 10.0 assert cutoff >= formulas.actual_temperature if workspace.rule.build_translated_rule(): workspace.found_answer = True", "# the object already spans the string - propose this object group =", "in string.objects: if objekt.leftmost: leftmost = objekt while leftmost.group and leftmost.group.bond_category == slipnet.sameness:", "sliplinks = formulas.similar_property_links(description.descriptor) assert sliplinks values = [ sliplink.degree_of_association() * sliplink.destination.activation for sliplink", "= 100.0 logging.info(\"succeeded: posting bond-builder\") coderack.new_codelet(\"bond-builder\", codelet, strength) def bond_builder(codelet): bond = codelet.arguments[0]", "from .workspace_formulas import choose_unmodified_object from .workspace_formulas import workspace from .workspace_object import WorkspaceObject #", "facet, source.get_descriptor(facet), destination.get_descriptor(facet), ) new_bond.build_bond() group.bond_list += [object1.right_bond] for incompatible in incompatible_groups: incompatible.break_the_structure()", "__get_scout_source( direction, formulas.local_direction_category_relevance, \"bond\" ) destination = choose_directed_neighbor(source, direction) assert destination logging.info(f\"to object:", "not formulas.select_list_position(activations): direction = slipnet.left else: direction = slipnet.right if direction == slipnet.left:", "if isinstance(o, Letter) and o.left_index == position ] letter_of_modified_string = more_letters and more_letters[0]", "incompatible bond list if len(group.object_list) > 1: previous = group.object_list[0] for objekt in", "= 1.0 else: number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds) nearly_total_length = len(workspace.initial) + len(workspace.target)", "destination_descriptor = __get_descriptors( bond_facet, source, destination ) category = source_descriptor.get_bond_category(destination_descriptor) assert category if", "def correspondence_strength_tester(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target assert object_from_initial", "break all objects for structure in break_objects: break_probability = formulas.temperature_adjusted_probability( structure.total_strength / 100.0", "facet: {bond_facet.get_name()}\") logging.info(f\"Source: {source}, destination: {destination}\") bond_descriptors = __get_descriptors(bond_facet, source, destination) source_descriptor, destination_descriptor", "# start the actual codelets def breaker(): probability_of_fizzle = (100.0 - formulas.Temperature) /", "__slippability(concept_mappings) # find out if any are distinguishing distinguishing_mappings = [m for m", "object1.right_bond: if group.direction_category == slipnet.right: source = object1 destination = object2 else: source", "True while search: search = False if not source.left_bond: continue if source.left_bond.category !=", "== slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond: logging.info(\"no", "\") coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def rule_scout(codelet): assert", "True bond_facet = None # find leftmost object in group with these bonds", "letter in initial string = {letter_of_initial_string}\") if letter_of_initial_string.replacement: logging.info( f\"Replacement already found for", "correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target want_flip = correspondence.flip_target_object if", "random.choice(bonds) category = chosen_bond.category direction_category = chosen_bond.direction_category bond_facet = chosen_bond.facet bonds = possible_group_bonds(category,", "not target_in_objects and (not (want_flip and target_not_flipped)) ) if correspondence.reflexive(): # if the", "description_builder(codelet): description = codelet.arguments[0] assert description.object in workspace.objects if description.object.described(description.descriptor): description.description_type.buffer = 100.0", "<= probability # it is strong enough - post builder & activate nodes", "want_flip = correspondence.flip_target_object if want_flip: flipper = object_from_target.flipped_version() target_not_flipped = not workspace.target.equivalent_group(flipper) else:", "if bond.same_neighbours(string_bond) and bond.same_categories(string_bond): if bond.direction_category: bond.direction_category.buffer = 100.0 bond.category.buffer = 100.0 logging.info(\"already", "object_list = [] if changed.replacement.relation: object_list += [changed.replacement.relation] object_list += [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ]", "object: {destination}\") bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination", "__show_which_string_object_is_from(source) destination = choose_neighbour(source) assert destination logging.info(f\"destination: {destination}\") bond_facet = __get_bond_facet(source, destination) logging.info(f\"chosen", "incompatible_weight): if not (incompatibles and len(incompatibles)): return True for incompatible in incompatibles: if", ") assert __structure_versus_structure( correspondence, correspondence_spans, incompatible, incompatible_spans ) incompatible_bond = None incompatible_group =", "isinstance(source, Letter): group = Group( source.string, slipnet.sameness_group, None, slipnet.letter_category, [source], [], ) probability", "concept_mappings, flip_target_object, codelet, ) def correspondence_strength_tester(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target", "source.left_bond.facet direction = source.left_bond.direction_category source = source.left_bond.left_object search = True # find rightmost", "source, category, bond_facet, destination_descriptor, source_descriptor, codelet, ) def top_down_bond_scout__direction(codelet): direction = codelet.arguments[0] source", "{destination_descriptor.name.upper()}\") category = source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category = slipnet.sameness", "import choose_directed_neighbor from .workspace_formulas import choose_neighbour from .workspace_formulas import choose_unmodified_object from .workspace_formulas import", "> 1.0: bond_density = 1.0 cutoff = __get_cut_off(bond_density) * 10.0 assert cutoff >=", "for mapping in correspondence.concept_mappings: if mapping.label: mapping.label.buffer = 100.0 if not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings", "incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building bond {bond}\") bond.build_bond() # pylint: disable=too-many-branches # pylint: disable=too-many-statements def", "# fight incompatible groups # fight all groups containing these objects incompatible_groups =", "formulas.Temperature = 100.0 def bottom_up_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects ) object_from_target =", "1.0] elif density > 0.4: distribution = [1.0, 2.0, 5.0, 150.0, 5.0, 2.0,", "incompatible_bonds = bond.get_incompatible_bonds() logging.info(f\"number of incompatible_bonds: {len(incompatible_bonds)}\") if len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds, bond,", "= changed.correspondence.object_from_target new_list = [] slippages = workspace.slippages() for node in object_list: node", "rightmost object in group with these bonds search = True destination = source", "== slipnet.right: source = object1 destination = object2 else: source = object2 destination", "return rule.update_strength() assert rule.total_strength # fight against other rules if workspace.rule: assert __structure_versus_structure(rule,", "= source.left_bond.direction_category source = source.left_bond.left_object search = True # find rightmost object in", "else: backward_bond = destination_descriptor.get_bond_category(source_descriptor) assert category in [forward_bond, backward_bond] if category == forward_bond:", "= slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): direction", "fight against it assert __structure_versus_structure( correspondence, 3.0, incompatible_bond, 2.0 ) # won against", "{int(target_unhappiness)}\" ) string = workspace.initial relevances = initial_relevance + target_relevance unhappinesses = initial_unhappiness", "check the other side of object if direction == slipnet.right: first_bond = source.left_bond", "source.right_bond if not first_bond: logging.info(\"no first_bond2\") else: logging.info(f\"first_bond2: {first_bond}\") if first_bond and not", "> 1: previous = group.object_list[0] for objekt in group.object_list[1:]: left_bond = objekt.left_bond if", "incompatible_bonds += [left_bond] previous = objekt next_object = group.object_list[-1] for objekt in reversed(group.object_list[:-1]):", "\"d\" object_list = [] if changed.replacement.relation: object_list += [changed.replacement.relation] object_list += [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category)", "relation = None logging.info(\"no relation found\") letter_of_initial_string.replacement = Replacement( letter_of_initial_string, letter_of_modified_string, relation )", "structure: return \"unstructured\" if isinstance(structure, WorkspaceObject): return \"target\" if structure.string == workspace.initial: return", "source.right_bond.direction_category destination = destination.right_bond.right_object search = True assert destination != source objects =", "assert cutoff >= formulas.actual_temperature if workspace.rule.build_translated_rule(): workspace.found_answer = True else: temperature.clamp_time = coderack.codelets_run", "len(incompatibles): if __fight(structure, structure_weight, incompatibles, incompatible_weight): logging.info(f\"broke the {name}\") return True logging.info(f\"failed to", "chosen_object.get_possible_descriptions(description_type) assert descriptions values = [n.activation for n in descriptions] i = formulas.select_list_position(values)", "first_bond2\") else: logging.info(f\"first_bond2: {first_bond}\") if first_bond and not first_bond.direction_category: direction = None assert", "100.0 coderack.new_codelet(\"correspondence-builder\", codelet, strength, correspondence) def correspondence_builder(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial", "= node.apply_slippages(slippages) if target_object.described(node): if target_object.distinguishing_descriptor(node): new_list += [node] object_list = new_list #", "for {type_name}\") source = choose_unmodified_object(\"intra_string_salience\", string.objects) return source def __get_bond_facet(source, destination): bond_facet =", "logging.info(f\"lost fight with {incompatible}\") return False logging.info(f\"won fight with {incompatible}\") return True def", "if the correspondence exists, activate concept mappings # and add new ones to", "{first_bond}\") category = first_bond.category assert category group_category = category.get_related_node(slipnet.group_category) logging.info(f\"trying from {source} to", "if bonds are there of the same direction incompatible_bonds = [] # incompatible", "= formulas.choose_relevant_description_by_activation(chosen_object) assert description sliplinks = formulas.similar_property_links(description.descriptor) assert sliplinks values = [ sliplink.degree_of_association()", "== slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond: logging.info(\"no", "= changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter = [ o for o in workspace.initial.objects if not o", "start the actual codelets def breaker(): probability_of_fizzle = (100.0 - formulas.Temperature) / 100.0", "logging.info(f\"{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}\") return weighted_strength1 > rhs def __fight(structure, structure_weight,", "flip_target_object = False if ( object_from_initial.spans_string() and object_from_target.spans_string() and slipnet.direction_category in initial_description_types and", "mappings if m.label != slipnet.opposite]) == 0 def __structure_versus_structure(structure1, weight1, structure2, weight2): structure1.update_strength()", "WorkspaceObject # some methods common to the codelets def __show_which_string_object_is_from(structure): if not structure:", "= changed_objects[-1] # generate a list of distinguishing descriptions for the first object", "slipnode initial_descriptor = slipnode for mapping in workspace.slippages(): if mapping.initial_descriptor == slipnode: initial_descriptor", "concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) assert concept_mappings assert __slippability(concept_mappings) #", "- propose this object group = leftmost coderack.propose_group( group.object_list, group.bond_list, group.group_category, group.direction_category, group.facet,", "{first_bond}\") if first_bond and not first_bond.direction_category: direction = None assert first_bond assert first_bond.direction_category", "concept_mappings if m.distinguishing()] assert distinguishing_mappings # if both objects span the strings, check", "[forward_bond, backward_bond] if category == forward_bond: coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor,", "== slipnet.identity: category = slipnet.sameness coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet,", "= formulas.temperature_adjusted_value( structure2.total_strength * weight2 ) rhs = (weighted_strength1 + weighted_strength2) * random.random()", "common to the codelets def __show_which_string_object_is_from(structure): if not structure: return \"unstructured\" if isinstance(structure,", "import random from . import formulas from . import temperature from .bond import", "formulas.local_direction_category_relevance, \"direction\" ) logging.info(f\"source chosen = {source}\") assert not source.spans_string() if source.leftmost: mydirection", "not __structure_versus_structure( structure, structure_weight, incompatible, incompatible_weight ): logging.info(f\"lost fight with {incompatible}\") return False", "bonds += [source.right_bond] objects += [source.right_bond.right_object] source = source.right_bond.right_object coderack.propose_group( objects, bonds, group_category,", "direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"bond\" ) destination = choose_directed_neighbor(source,", "a list of distinguishing descriptions for the first object # ie. string-position (left-,right-most,middle", "incompatible_groups = bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups, bond, \"groups\", 1.0, 1.0) # fight all incompatible", "incompatible_weight ): logging.info(f\"lost fight with {incompatible}\") return False logging.info(f\"won fight with {incompatible}\") return", "distribution = [1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0] elif", "if not len(other_objects_of_same_letter): object_list += [letter] # if this object corresponds to another", "first_bond: logging.info(\"no first_bond\") else: logging.info(f\"first_bond: {first_bond}\") if first_bond and not first_bond.direction_category: direction =", "Letter): group = Group( source.string, slipnet.sameness_group, None, slipnet.letter_category, [source], [], ) probability =", "# create new bonds group.bond_list = [] for i in range(1, len(group.object_list)): object1", "= 100.0 description.update_strength() strength = description.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert formulas.coin_flip(probability)", "workspace.objects assert ( object_from_target in workspace.objects or correspondence.flip_target_object and not workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength()", "possible_group_bonds from .coderack import coderack from .correspondence import Correspondence from .group import Group", ") if formulas.coin_flip(break_probability): return for structure in break_objects: structure.break_the_structure() def bottom_up_description_scout(codelet): chosen_object =", "descriptor, slipnet.letter, relation, codelet ) def rule_strength_tester(codelet): rule = codelet.arguments[0] rule.update_strength() probability =", "else: logging.info(f\"initial string selected: {workspace.initial} for {type_name}\") source = choose_unmodified_object(\"intra_string_salience\", string.objects) return source", "group.activate_descriptions() logging.info(\"building group\") def rule_builder(codelet): rule = codelet.arguments[0] if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return rule.update_strength()", "= the union of this and the distingushing descriptors if changed.correspondence: target_object =", "if left_bond: if left_bond.left_object == previous: continue if left_bond.direction_category == group.direction_category: continue incompatible_bonds", "def __get_descriptors(bond_facet, source, destination): source_descriptor = source.get_descriptor(bond_facet) destination_descriptor = destination.get_descriptor(bond_facet) assert source_descriptor assert", "logging.info(f\"source chosen = {source}\") assert not source.spans_string() if source.leftmost: mydirection = slipnet.right elif", "it belongs leftmost = None for objekt in string.objects: if objekt.leftmost: leftmost =", "direction incompatible_bonds = [] # incompatible bond list if len(group.object_list) > 1: previous", "concept_mappings, flip_target_object, codelet, ) def important_object_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"relative_importance\", workspace.initial.objects ) descriptors", "number_of_bonds / nearly_total_length if bond_density > 1.0: bond_density = 1.0 cutoff = __get_cut_off(bond_density)", "continue incompatible_bonds += [left_bond] previous = objekt next_object = group.object_list[-1] for objekt in", "= [] # incompatible bond list if len(group.object_list) > 1: previous = group.object_list[0]", "(Group, Bond, Correspondence)) ] assert structures structure = random.choice(structures) __show_which_string_object_is_from(structure) break_objects = [structure]", "if bond.direction_category: incompatible_correspondences = bond.get_incompatible_correspondences() if incompatible_correspondences: logging.info(\"trying to break incompatible correspondences\") assert", "coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet ) def top_down_group_scout__direction(codelet): direction = codelet.arguments[0]", "[source.right_bond.right_object] source = source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet ) def", "bond_strength_tester(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() strength = bond.total_strength probability = formulas.temperature_adjusted_probability(strength /", "destination = source while search: search = False if not destination.right_bond: continue if", "group = Group( source.string, slipnet.sameness_group, None, slipnet.letter_category, [source], [], ) probability = group.single_letter_group_probability()", "bonds while search: search = False if not source.left_bond: continue if source.left_bond.category !=", "object_from_target = object_from_target.flipped_version() concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) flip_target_object =", "ord(workspace.initial_string[position]) modified_ascii = ord(workspace.modified_string[position]) diff = initial_ascii - modified_ascii if abs(diff) < 2:", "destination.get_descriptor(facet), ) new_bond.build_bond() group.bond_list += [object1.right_bond] for incompatible in incompatible_groups: incompatible.break_the_structure() group.build_group() group.activate_descriptions()", "assert random.random() >= probability coderack.propose_single_letter_group(source, codelet) return direction = first_bond.direction_category search = True", "workspace.rule assert __structure_versus_structure( correspondence, 1.0, incompatible_rule, 1.0 ) for incompatible in incompatibles: incompatible.break_the_structure()", "> {rhs}: {weighted_strength1 > rhs}\") return weighted_strength1 > rhs def __fight(structure, structure_weight, incompatibles,", "0.2: distribution = [1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0]", "for m in distinguishing_mappings if m.initial_description_type == slipnet.string_position_category and m.initial_description_type != slipnet.bond_facet ]", "5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.4:", "] initial_description_types = [m.initial_description_type for m in opposites] flip_target_object = False if (", "False logging.info(f\"won fight with {incompatible}\") return True def __fight_incompatibles( incompatibles, structure, name, structure_weight,", "= choose_unmodified_object( \"inter_string_salience\", target_candidates ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get the posible", ") return bonds = [] objects = [leftmost] while leftmost.right_bond: bonds += [leftmost.right_bond]", "if m.initial_description_type == slipnet.string_position_category and m.initial_description_type != slipnet.bond_facet ] initial_description_types = [m.initial_description_type for", "# if there are no changed objects, propose a rule with no changes", "def rule_scout(codelet): assert workspace.number_of_unreplaced_objects() == 0 changed_objects = [o for o in workspace.initial.objects", "= False if not destination.right_bond: continue if destination.right_bond.category != category: continue if destination.right_bond.direction_category", "destination_descriptor def __all_opposite_mappings(mappings): return len([m for m in mappings if m.label != slipnet.opposite])", "not first_bond: logging.info(\"no first_bond\") else: logging.info(f\"first_bond: {first_bond}\") if first_bond and not first_bond.direction_category: direction", "find out if any are distinguishing distinguishing_mappings = [m for m in concept_mappings", "[ o for o in workspace.initial.objects if not o != changed and o.get_description_type(letter)", "if isinstance(structure, Bond): if structure.source.group: if structure.source.group == structure.destination.group: break_objects += [structure.source.group] #", "f\"initial : relevance = {initial_relevance}, \" f\"unhappiness = {int(initial_unhappiness)}\" ) logging.info( f\"target :", "object_from_initial = choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects ) object_from_target = choose_unmodified_object( \"inter_string_salience\", workspace.target.objects ) assert", "= [] if bond.left_object.leftmost or bond.right_object.rightmost: if bond.direction_category: incompatible_correspondences = bond.get_incompatible_correspondences() if incompatible_correspondences:", "slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): mydirection =", "bonds exist - fight group.update_strength() assert __fight_incompatibles(incompatible_bonds, group, \"bonds\", 1.0, 1.0) # fight", "and target_not_flipped)) ) if correspondence.reflexive(): # if the correspondence exists, activate concept mappings", "bond_facet == destination.right_bond.facet: bond_facet = destination.right_bond.facet direction = source.right_bond.direction_category destination = destination.right_bond.right_object search", "= True while search: search = False if not source.left_bond: continue if source.left_bond.category", "another object in the workspace # object_list = the union of this and", "source.left_bond.direction_category != direction: if source.left_bond.direction_category: continue if not bond_facet or bond_facet == source.left_bond.facet:", "Bond( source, destination, category, facet, source.get_descriptor(facet), destination.get_descriptor(facet), ) new_bond.build_bond() group.bond_list += [object1.right_bond] for", "logging.info(f\"destination: {destination}\") bond_facet = __get_bond_facet(source, destination) logging.info(f\"chosen bond facet: {bond_facet.get_name()}\") logging.info(f\"Source: {source}, destination:", "create new bonds group.bond_list = [] for i in range(1, len(group.object_list)): object1 =", "destination_descriptor = __get_descriptors( bond_facet, source, destination ) forward_bond = source_descriptor.get_bond_category(destination_descriptor) if forward_bond ==", "source = source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet ) def top_down_group_scout__direction(codelet):", "rule with no changes if not changed_objects: return coderack.propose_rule(None, None, None, None, codelet)", "5.0, 150.0, 5.0, 2.0, 1.0, 1.0] stop = sum(distribution) * random.random() total =", "slippages = workspace.slippages() for node in object_list: node = node.apply_slippages(slippages) if target_object.described(node): if", "if bond.left_object.leftmost or bond.right_object.rightmost: if bond.direction_category: incompatible_correspondences = bond.get_incompatible_correspondences() if incompatible_correspondences: logging.info(\"trying to", "return False logging.info(f\"won fight with {incompatible}\") return True def __fight_incompatibles( incompatibles, structure, name,", "150.0, 5.0, 2.0, 1.0, 1.0, 1.0] else: distribution = [1.0, 1.0, 1.0, 2.0,", "node in object_list: depth = node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list += [value] i", "workspace.slippages() for node in object_list: node = node.apply_slippages(slippages) if target_object.described(node): if target_object.distinguishing_descriptor(node): new_list", "+ weighted_strength2) * random.random() logging.info(f\"{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}\") return weighted_strength1 >", "find leftmost object & the highest group to which it belongs leftmost =", "= codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength() strength = group.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert", "chosen_bond.category direction_category = chosen_bond.direction_category bond_facet = chosen_bond.facet bonds = possible_group_bonds(category, direction_category, bond_facet, bonds)", "return source_descriptor, destination_descriptor def __all_opposite_mappings(mappings): return len([m for m in mappings if m.label", "destination_descriptor.get_bond_category(source_descriptor) assert category in [forward_bond, backward_bond] if category == forward_bond: coderack.propose_bond( source, destination,", "[mapping] return incompatibles = correspondence.get_incompatible_correspondences() # fight against all correspondences if incompatibles: correspondence_spans", "= 100.0 if not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings += [mapping] return incompatibles = correspondence.get_incompatible_correspondences() #", ") # won against incompatible bond incompatible_group = target.group if incompatible_group: assert __structure_versus_structure(", "choose a description value_list = [] for node in object_list: depth = node.conceptual_depth", "first_bond = source.right_bond if not first_bond or first_bond.category != category: # check the", "= len(workspace.initial.bonds) + len(workspace.target.bonds) nearly_total_length = len(workspace.initial) + len(workspace.target) - 2 bond_density =", "rule_scout(codelet): assert workspace.number_of_unreplaced_objects() == 0 changed_objects = [o for o in workspace.initial.objects if", "new_bond = Bond( source, destination, category, facet, source.get_descriptor(facet), destination.get_descriptor(facet), ) new_bond.build_bond() group.bond_list +=", "{category.name} bond \") coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def", "100.0 logging.info(\"already exists: activate descriptors & Fizzle\") return incompatible_bonds = bond.get_incompatible_bonds() logging.info(f\"number of", "incompatible_groups: incompatible.break_the_structure() group.build_group() group.activate_descriptions() logging.info(\"building group\") def rule_builder(codelet): rule = codelet.arguments[0] if rule.rule_equal(workspace.rule):", "Letter from .replacement import Replacement from .slipnet import slipnet from .workspace_formulas import choose_bond_facet", "object_from_target, concept_mappings, flip_target_object, codelet, ) def important_object_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"relative_importance\", workspace.initial.objects )", "if mydirection == slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond if not", "initial_relevance = relevance_method(workspace.initial, slipnode) target_relevance = relevance_method(workspace.target, slipnode) initial_unhappiness = workspace.initial.intra_string_unhappiness target_unhappiness =", "correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span() ) for incompatible in incompatibles: incompatible_spans = ( incompatible.object_from_initial.letter_span() +", "for i in range(0, len(distribution)): total += distribution[i] if total >= stop: return", ".letter import Letter from .replacement import Replacement from .slipnet import slipnet from .workspace_formulas", "choose_unmodified_object from .workspace_formulas import workspace from .workspace_object import WorkspaceObject # some methods common", "if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule = workspace.rule assert __structure_versus_structure( correspondence, 1.0, incompatible_rule, 1.0 ) for", "slipnet.bond_facet ] initial_description_types = [m.initial_description_type for m in opposites] flip_target_object = False if", "then fight against it initial = correspondence.object_from_initial target = correspondence.object_from_target if initial.leftmost or", "category if category == slipnet.identity: category = slipnet.sameness coderack.propose_bond( source, destination, category, bond_facet,", "def group_scout__whole_string(codelet): string = workspace.initial if random.random() > 0.5: string = workspace.target logging.info(f\"target", "5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.2: distribution", "True destination = source search = True while search: search = False if", "or initial.rightmost and target.leftmost or target.rightmost: # search for the incompatible bond incompatible_bond", "correspondence.get_incompatible_correspondences() # fight against all correspondences if incompatibles: correspondence_spans = ( correspondence.object_from_initial.letter_span() +", "from .bond import Bond from .bond import possible_group_bonds from .coderack import coderack from", "is strong enough - post builder & activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0 if", "not source.spans_string() if source.leftmost: direction = slipnet.right elif source.rightmost: direction = slipnet.left else:", "first_bond.category assert category group_category = category.get_related_node(slipnet.group_category) logging.info(f\"trying from {source} to {category.name}\") bond_facet =", "= [] slippages = workspace.slippages() for node in object_list: node = node.apply_slippages(slippages) if", "important_object_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"relative_importance\", workspace.initial.objects ) descriptors = object_from_initial.relevant_distinguishing_descriptors() slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors)", "string object_list = [] position = changed.get_descriptor(slipnet.string_position_category) if position: object_list += [position] letter", "continue if source.left_bond.category != category: continue if source.left_bond.direction_category != direction: if source.left_bond.direction_category: continue", "an incompatible bond then fight against it initial = correspondence.object_from_initial target = correspondence.object_from_target", "coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def rule_scout(codelet): assert workspace.number_of_unreplaced_objects()", "concept_mappings: slippiness = mapping.slippability() / 100.0 probability_of_slippage = formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage): return True", "not first_bond: logging.info(\"no first_bond2\") else: logging.info(f\"first_bond2: {first_bond}\") if first_bond and not first_bond.direction_category: direction", "= slipnet.right if direction == slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond", "category if category == slipnet.identity: category = slipnet.sameness logging.info(f\"proposing {category.name} bond \") coderack.propose_bond(", "first_bond.category != category: # check the other side of object if direction ==", "direction = slipnet.left else: direction = slipnet.right if direction == slipnet.left: first_bond =", ") new_bond.build_bond() group.bond_list += [object1.right_bond] for incompatible in incompatible_groups: incompatible.break_the_structure() group.build_group() group.activate_descriptions() logging.info(\"building", "== direction logging.info(f\"possible group: {first_bond}\") category = first_bond.category assert category group_category = category.get_related_node(slipnet.group_category)", "chosen_property, codelet ) def description_strength_tester(codelet): description = codelet.arguments[0] description.descriptor.buffer = 100.0 description.update_strength() strength", "m in opposites] flip_target_object = False if ( object_from_initial.spans_string() and object_from_target.spans_string() and slipnet.direction_category", "= object1 category = group.group_category.get_related_node(slipnet.bond_category) facet = group.facet new_bond = Bond( source, destination,", "formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) assert concept_mappings assert __slippability(concept_mappings) # find out", "= source.left_bond.left_object search = True # find rightmost object in group with these", "= formulas.temperature_adjusted_probability(strength / 100.0) assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet, strength) def description_builder(codelet): description =", "slipnet.sameness else: backward_bond = destination_descriptor.get_bond_category(source_descriptor) assert category in [forward_bond, backward_bond] if category ==", "codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() assert bond.source in workspace.objects or bond.destination in workspace.objects for string_bond", "weight1 ) weighted_strength2 = formulas.temperature_adjusted_value( structure2.total_strength * weight2 ) rhs = (weighted_strength1 +", "group_category.get_related_node(slipnet.bond_category) assert category source = __get_scout_source( category, formulas.local_bond_category_relevance, \"group\" ) assert source assert", "letters = [o for o in workspace.initial.objects if isinstance(o, Letter)] letter_of_initial_string = random.choice(letters)", "string = {letter_of_initial_string}\") if letter_of_initial_string.replacement: logging.info( f\"Replacement already found for {letter_of_initial_string}, so fizzling\"", "source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def bond_strength_tester(codelet): bond = codelet.arguments[0]", "choose_bond_facet from .workspace_formulas import choose_directed_neighbor from .workspace_formulas import choose_neighbour from .workspace_formulas import choose_unmodified_object", "workspace.number_of_unreplaced_objects() == 0 changed_objects = [o for o in workspace.initial.objects if o.changed] #", "or letter category # if it is the only one of its type", "formulas.temperature_adjusted_probability(rule.total_strength / 100.0) assert random.random() <= probability coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength, rule) def replacement_finder():", "{name}\") return True def __slippability(concept_mappings): for mapping in concept_mappings: slippiness = mapping.slippability() /", "object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def correspondence_strength_tester(codelet): correspondence = codelet.arguments[0] object_from_initial =", "and len(incompatibles)): return True for incompatible in incompatibles: if not __structure_versus_structure( structure, structure_weight,", "description = formulas.choose_relevant_description_by_activation(chosen_object) assert description sliplinks = formulas.similar_property_links(description.descriptor) assert sliplinks values = [", "slipnet.successor, 1: slipnet.predecessor} relation = relations[diff] logging.info(f\"Relation found: {relation.name}\") else: relation = None", "workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) description = formulas.choose_relevant_description_by_activation(chosen_object) assert description sliplinks = formulas.similar_property_links(description.descriptor) assert", "logging.info(\"already exists...activate descriptors & fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return # check to see if", "incompatible in incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building bond {bond}\") bond.build_bond() # pylint: disable=too-many-branches # pylint:", "< 2 # if there are no changed objects, propose a rule with", "Fizzle\") return incompatible_bonds = bond.get_incompatible_bonds() logging.info(f\"number of incompatible_bonds: {len(incompatible_bonds)}\") if len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert", "for incompatible in incompatible_bonds: incompatible.break_the_structure() # create new bonds group.bond_list = [] for", "slipnet from .workspace_formulas import choose_bond_facet from .workspace_formulas import choose_directed_neighbor from .workspace_formulas import choose_neighbour", "incompatible_spans ) incompatible_bond = None incompatible_group = None # if there is an", "search = True while search: search = False if not destination.right_bond: continue if", "incompatible_group = target.group if incompatible_group: assert __structure_versus_structure( correspondence, 1.0, incompatible_group, 1.0 ) #", "structures = [ s for s in workspace.structures if isinstance(s, (Group, Bond, Correspondence))", "bond.left_object.leftmost or bond.right_object.rightmost: if bond.direction_category: incompatible_correspondences = bond.get_incompatible_correspondences() if incompatible_correspondences: logging.info(\"trying to break", "isinstance(s, (Group, Bond, Correspondence)) ] assert structures structure = random.choice(structures) __show_which_string_object_is_from(structure) break_objects =", "needs to be flipped opposites = [ m for m in distinguishing_mappings if", "slipnet.sameness and isinstance(source, Letter): group = Group( source.string, slipnet.sameness_group, None, slipnet.letter_category, [source], [],", "True return False # start the actual codelets def breaker(): probability_of_fizzle = (100.0", "assert object_list # use conceptual depth to choose a description value_list = []", "from .replacement import Replacement from .slipnet import slipnet from .workspace_formulas import choose_bond_facet from", "source_descriptor.get_bond_category(destination_descriptor) if forward_bond == slipnet.identity: forward_bond = slipnet.sameness backward_bond = slipnet.sameness else: backward_bond", "if the # string description needs to be flipped opposites = [ m", "of object if direction == slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond", "if isinstance(structure, WorkspaceObject): return \"target\" if structure.string == workspace.initial: return \"initial\" return \"other\"", "modified_ascii = ord(workspace.modified_string[position]) diff = initial_ascii - modified_ascii if abs(diff) < 2: relations", "direction == slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond", "= [o for o in workspace.initial.objects if o.changed] # assert len(changed_objects) < 2", "while search: search = False if not source.left_bond: continue if source.left_bond.category != category:", "bond_density = 1.0 cutoff = __get_cut_off(bond_density) * 10.0 assert cutoff >= formulas.actual_temperature if", "= first_bond.direction_category search = True bond_facet = None # find leftmost object in", "True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def important_object_correspondence_scout(codelet): object_from_initial = choose_unmodified_object(", "if ( object_from_initial.spans_string() and object_from_target.spans_string() and slipnet.direction_category in initial_description_types and __all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation", "category group_category = category.get_related_node(slipnet.group_category) logging.info(f\"trying from {source} to {category.name}\") bond_facet = None #", "if incompatible_bond: # bond found - fight against it assert __structure_versus_structure( correspondence, 3.0,", "[1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0] elif density >", "mapping.target_descriptor.buffer = 100.0 coderack.new_codelet(\"correspondence-builder\", codelet, strength, correspondence) def correspondence_builder(codelet): correspondence = codelet.arguments[0] object_from_initial", "- 1] object2 = group.object_list[i] if not object1.right_bond: if group.direction_category == slipnet.right: source", "in workspace.structures if isinstance(s, (Group, Bond, Correspondence)) ] assert structures structure = random.choice(structures)", "if bond.direction_category: bond.direction_category.buffer = 100.0 bond.category.buffer = 100.0 logging.info(\"already exists: activate descriptors &", "group_category, direction_category, bond_facet, codelet ) def group_strength_tester(codelet): # update strength value of the", "codelet, ) def important_object_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"relative_importance\", workspace.initial.objects ) descriptors = object_from_initial.relevant_distinguishing_descriptors()", "relevances = initial_relevance + target_relevance unhappinesses = initial_unhappiness + target_unhappiness randomized = random.random()", "break_objects: break_probability = formulas.temperature_adjusted_probability( structure.total_strength / 100.0 ) if formulas.coin_flip(break_probability): return for structure", "in break_objects: structure.break_the_structure() def bottom_up_description_scout(codelet): chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) description", "relevance_method, type_name): initial_relevance = relevance_method(workspace.initial, slipnode) target_relevance = relevance_method(workspace.target, slipnode) initial_unhappiness = workspace.initial.intra_string_unhappiness", "{destination}\") bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination )", "logging.info(f\"selected letter in initial string = {letter_of_initial_string}\") if letter_of_initial_string.replacement: logging.info( f\"Replacement already found", "= workspace.target logging.info(f\"target string selected: {workspace.target} for {type_name}\") else: logging.info(f\"initial string selected: {workspace.initial}", "still there for o in group.object_list: assert o in workspace.objects # check to", "correspondence.object_from_target.letter_span() ) for incompatible in incompatibles: incompatible_spans = ( incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span() )", "category = source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category = slipnet.sameness coderack.propose_bond(", "bonds, group_category, direction, bond_facet, codelet ) def top_down_group_scout__direction(codelet): direction = codelet.arguments[0] source =", "bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination ) category", "in range(1, len(group.object_list)): object1 = group.object_list[i - 1] object2 = group.object_list[i] if not", "= bond.get_incompatible_correspondences() if incompatible_correspondences: logging.info(\"trying to break incompatible correspondences\") assert __fight(bond, 2.0, incompatible_correspondences,", "from .workspace_formulas import workspace from .workspace_object import WorkspaceObject # some methods common to", "generate a list of distinguishing descriptions for the first object # ie. string-position", "= group.object_list[i] if not object1.right_bond: if group.direction_category == slipnet.right: source = object1 destination", "choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) description = formulas.choose_relevant_description_by_activation(chosen_object) assert description sliplinks = formulas.similar_property_links(description.descriptor)", "the actual codelets def breaker(): probability_of_fizzle = (100.0 - formulas.Temperature) / 100.0 assert", "__get_bond_facet(source, destination) logging.info(f\"chosen bond facet: {bond_facet.get_name()}\") logging.info(f\"Source: {source}, destination: {destination}\") bond_descriptors = __get_descriptors(bond_facet,", "True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def correspondence_strength_tester(codelet): correspondence = codelet.arguments[0]", "fight with {incompatible}\") return False logging.info(f\"won fight with {incompatible}\") return True def __fight_incompatibles(", "for incompatible in incompatibles: incompatible_spans = ( incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span() ) assert __structure_versus_structure(", "destination_descriptor, source_descriptor, codelet, ) def top_down_bond_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction,", "in workspace.modified.objects if isinstance(o, Letter) and o.left_index == position ] letter_of_modified_string = more_letters", "/ nearly_total_length if bond_density > 1.0: bond_density = 1.0 cutoff = __get_cut_off(bond_density) *", "randomized > initials: string = workspace.target logging.info(f\"target string selected: {workspace.target} for {type_name}\") else:", "correspondence_strength_tester(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target assert object_from_initial in", "incompatibles: incompatible_spans = ( incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span() ) assert __structure_versus_structure( correspondence, correspondence_spans, incompatible,", "rule.activate_rule_descriptions() return rule.update_strength() assert rule.total_strength # fight against other rules if workspace.rule: assert", "posting bond-builder\") coderack.new_codelet(\"bond-builder\", codelet, strength) def bond_builder(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() assert", "= slipnet.left else: direction = slipnet.right if direction == slipnet.left: first_bond = source.left_bond", "from .letter import Letter from .replacement import Replacement from .slipnet import slipnet from", "= choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) descriptions = chosen_object.get_possible_descriptions(description_type) assert descriptions values =", "all incompatible correspondences incompatible_correspondences = [] if bond.left_object.leftmost or bond.right_object.rightmost: if bond.direction_category: incompatible_correspondences", "= ( incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span() ) assert __structure_versus_structure( correspondence, correspondence_spans, incompatible, incompatible_spans )", "not formulas.select_list_position(activations): mydirection = slipnet.left else: mydirection = slipnet.right if mydirection == slipnet.left:", "[] # incompatible bond list if len(group.object_list) > 1: previous = group.object_list[0] for", "objekt.left_bond if left_bond: if left_bond.left_object == previous: continue if left_bond.direction_category == group.direction_category: continue", "initial_unhappiness = workspace.initial.intra_string_unhappiness target_unhappiness = workspace.target.intra_string_unhappiness logging.info( f\"initial : relevance = {initial_relevance}, \"", "= True formulas.Temperature = 100.0 def bottom_up_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects )", "100.0 else: description.build() def bottom_up_bond_scout(codelet): source = choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source) destination = choose_neighbour(source)", "bond.get_incompatible_correspondences() if incompatible_correspondences: logging.info(\"trying to break incompatible correspondences\") assert __fight(bond, 2.0, incompatible_correspondences, 3.0)", "def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category = codelet.arguments[0] source = __get_scout_source( category, formulas.local_bond_category_relevance, \"bond\" )", "object_from_target = correspondence.object_from_target assert object_from_initial in workspace.objects assert ( object_from_target in workspace.objects or", "\"bonds\", 1.0, 1.0) incompatible_groups = bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups, bond, \"groups\", 1.0, 1.0) #", "object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target assert object_from_initial in workspace.objects assert ( object_from_target", "it is strong enough - post builder & activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0", "top_down_group_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"direction\" ) logging.info(f\"source chosen", "= source.left_bond.facet direction = source.left_bond.direction_category source = source.left_bond.left_object search = True # find", "= relevance_method(workspace.initial, slipnode) target_relevance = relevance_method(workspace.target, slipnode) initial_unhappiness = workspace.initial.intra_string_unhappiness target_unhappiness = workspace.target.intra_string_unhappiness", "codelet, ) def rule_scout(codelet): assert workspace.number_of_unreplaced_objects() == 0 changed_objects = [o for o", "formulas.select_list_position(activations): mydirection = slipnet.left else: mydirection = slipnet.right if mydirection == slipnet.left: first_bond", "value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) equivalent = group.string.equivalent_group(group) if equivalent:", "object_from_target = choose_unmodified_object( \"inter_string_salience\", workspace.target.objects ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get the", "return position = letter_of_initial_string.left_index more_letters = [ o for o in workspace.modified.objects if", "incompatible_bonds: incompatible.break_the_structure() # create new bonds group.bond_list = [] for i in range(1,", "first_bond assert first_bond.direction_category == direction logging.info(f\"possible group: {first_bond}\") category = first_bond.category assert category", "coderack.new_codelet(\"bond-builder\", codelet, strength) def bond_builder(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() assert bond.source in", "else: direction = slipnet.right if direction == slipnet.left: first_bond = source.left_bond else: first_bond", "None, slipnet.letter_category, [source], [], ) probability = group.single_letter_group_probability() assert random.random() >= probability coderack.propose_single_letter_group(source,", "continue if destination.right_bond.direction_category != direction: if destination.right_bond.direction_category: continue if not bond_facet or bond_facet", "objekt in group.object_list[1:]: left_bond = objekt.left_bond if left_bond: if left_bond.left_object == previous: continue", ". import temperature from .bond import Bond from .bond import possible_group_bonds from .coderack", "source def __get_bond_facet(source, destination): bond_facet = choose_bond_facet(source, destination) assert bond_facet return bond_facet def", "leftmost.group if leftmost.spans_string(): # the object already spans the string - propose this", "random.random() <= probability # it is strong enough - post builder & activate", "add new ones to the existing corr. existing = correspondence.object_from_initial.correspondence for mapping in", "= __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination ) category =", "category in [forward_bond, backward_bond] if category == forward_bond: coderack.propose_bond( source, destination, category, bond_facet,", "== slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond or", "break_objects += [structure.source.group] # try to break all objects for structure in break_objects:", "0 changed_objects = [o for o in workspace.initial.objects if o.changed] # assert len(changed_objects)", "object_from_target = correspondence.object_from_target want_flip = correspondence.flip_target_object if want_flip: flipper = object_from_target.flipped_version() target_not_flipped =", "len(workspace.target) == 1: bond_density = 1.0 else: number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds) nearly_total_length", "= correspondence.object_from_initial object_from_target = correspondence.object_from_target want_flip = correspondence.flip_target_object if want_flip: flipper = object_from_target.flipped_version()", "objects, bonds, group_category, direction, bond_facet, codelet ) def top_down_group_scout__direction(codelet): direction = codelet.arguments[0] source", "[m.initial_description_type for m in opposites] flip_target_object = False if ( object_from_initial.spans_string() and object_from_target.spans_string()", "mydirection = slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations):", "changed.replacement.relation: object_list += [changed.replacement.relation] object_list += [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] # use conceptual depth", "leftmost = None for objekt in string.objects: if objekt.leftmost: leftmost = objekt while", "groups containing these objects incompatible_groups = group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups, group, \"Groups\", 1.0, 1.0)", "object_list[i] coderack.propose_rule( slipnet.letter_category, descriptor, slipnet.letter, relation, codelet ) def rule_strength_tester(codelet): rule = codelet.arguments[0]", "objects += [source.right_bond.right_object] source = source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet", "structure.total_strength / 100.0 ) if formulas.coin_flip(break_probability): return for structure in break_objects: structure.break_the_structure() def", "incompatible_bonds += [right_bond] next_object = objekt # if incompatible bonds exist - fight", "def __show_which_string_object_is_from(structure): if not structure: return \"unstructured\" if isinstance(structure, WorkspaceObject): return \"target\" if", "formulas.select_list_position(value_list) descriptor = object_list[i] # choose the relation (change the letmost object to", "already found for {letter_of_initial_string}, so fizzling\" ) return position = letter_of_initial_string.left_index more_letters =", "flipped opposites = [ m for m in distinguishing_mappings if m.initial_description_type == slipnet.string_position_category", "bond then fight against it initial = correspondence.object_from_initial target = correspondence.object_from_target if initial.leftmost", "incompatible rule, fight against it incompatible_rule = None if workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule", "codelet.arguments[0] __show_which_string_object_is_from(group) equivalent = group.string.equivalent_group(group) if equivalent: logging.info(\"already exists...activate descriptors & fizzle\") group.activate_descriptions()", "of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength() strength = group.total_strength probability =", "search for the incompatible bond incompatible_bond = correspondence.get_incompatible_bond() if incompatible_bond: # bond found", "at random structures = [ s for s in workspace.structures if isinstance(s, (Group,", "* sliplink.destination.activation for sliplink in sliplinks ] i = formulas.select_list_position(values) chosen = sliplinks[i]", "string = workspace.target logging.info(f\"target string selected: {workspace.target} for {type_name}\") else: logging.info(f\"initial string selected:", "chosen_property = chosen.destination coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def top_down_description_scout(codelet): description_type =", "formulas.local_bond_category_relevance, \"group\" ) assert source assert not source.spans_string() if source.leftmost: direction = slipnet.right", "distinguishing_mappings = [m for m in concept_mappings if m.distinguishing()] assert distinguishing_mappings # if", "{len(incompatible_bonds)}\") if len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds, bond, \"bonds\", 1.0, 1.0) incompatible_groups = bond.source.get_common_groups(bond.destination)", "assert initial_in_objects or ( not target_in_objects and (not (want_flip and target_not_flipped)) ) if", "True destination = source while search: search = False if not destination.right_bond: continue", "if not formulas.select_list_position(activations): direction = slipnet.left else: direction = slipnet.right if direction ==", "True formulas.Temperature = 100.0 def bottom_up_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects ) object_from_target", "destination) logging.info(f\"chosen bond facet: {bond_facet.get_name()}\") logging.info(f\"Source: {source}, destination: {destination}\") bond_descriptors = __get_descriptors(bond_facet, source,", "= [] position = changed.get_descriptor(slipnet.string_position_category) if position: object_list += [position] letter = changed.get_descriptor(slipnet.letter_category)", "= correspondence.object_from_target if initial.leftmost or initial.rightmost and target.leftmost or target.rightmost: # search for", "__get_descriptors(bond_facet, source, destination) source_descriptor, destination_descriptor = bond_descriptors logging.info(f\"source descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\")", "exists: activate descriptors & Fizzle\") return incompatible_bonds = bond.get_incompatible_bonds() logging.info(f\"number of incompatible_bonds: {len(incompatible_bonds)}\")", "string description needs to be flipped opposites = [ m for m in", "relation != slipnet.sameness: letter_of_initial_string.changed = True workspace.changed_object = letter_of_initial_string logging.info(\"building replacement\") def top_down_bond_scout__category(codelet):", "group.single_letter_group_probability() assert random.random() >= probability coderack.propose_single_letter_group(source, codelet) return direction = first_bond.direction_category search =", "else: logging.info(f\"initial string selected: {workspace.initial}\") # find leftmost object & the highest group", "def rule_builder(codelet): rule = codelet.arguments[0] if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return rule.update_strength() assert rule.total_strength #", "with these bonds while search: search = False if not source.left_bond: continue if", "{rhs}: {weighted_strength1 > rhs}\") return weighted_strength1 > rhs def __fight(structure, structure_weight, incompatibles, incompatible_weight):", "if formulas.coin_flip(probability_of_slippage): return True return False # start the actual codelets def breaker():", "chosen_object, chosen_property.category(), chosen_property, codelet ) def top_down_description_scout(codelet): description_type = codelet.arguments[0] chosen_object = choose_unmodified_object(\"total_salience\",", "destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def bond_strength_tester(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond)", "belongs leftmost = None for objekt in string.objects: if objekt.leftmost: leftmost = objekt", "# ie. string-position (left-,right-most,middle or whole) or letter category # if it is", "__fight(structure, structure_weight, incompatibles, incompatible_weight): if not (incompatibles and len(incompatibles)): return True for incompatible", "assert bond_facet return bond_facet def __get_descriptors(bond_facet, source, destination): source_descriptor = source.get_descriptor(bond_facet) destination_descriptor =", "assert structures structure = random.choice(structures) __show_which_string_object_is_from(structure) break_objects = [structure] if isinstance(structure, Bond): if", "the group group = codelet.arguments[0] __show_which_string_object_is_from(group) equivalent = group.string.equivalent_group(group) if equivalent: logging.info(\"already exists...activate", "source_descriptor = source.get_descriptor(bond_facet) destination_descriptor = destination.get_descriptor(bond_facet) assert source_descriptor assert destination_descriptor return source_descriptor, destination_descriptor", "won against incompatible bond incompatible_group = target.group if incompatible_group: assert __structure_versus_structure( correspondence, 1.0,", "codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() strength = bond.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) logging.info(f\"bond strength", "= codelet.arguments[0] category = group_category.get_related_node(slipnet.bond_category) assert category source = __get_scout_source( category, formulas.local_bond_category_relevance, \"group\"", "direction = first_bond.direction_category search = True bond_facet = None # find leftmost object", "__get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination ) category = source_descriptor.get_bond_category(destination_descriptor)", "bond_facet = choose_bond_facet(source, destination) assert bond_facet return bond_facet def __get_descriptors(bond_facet, source, destination): source_descriptor", "= choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) description = formulas.choose_relevant_description_by_activation(chosen_object) assert description sliplinks =", "slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor} relation = relations[diff] logging.info(f\"Relation found: {relation.name}\") else: relation", "= slipnet.sameness logging.info(f\"proposing {category.name} bond \") coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor,", "= category.get_related_node(slipnet.group_category) logging.info(f\"trying from {source} to {category.name}\") bond_facet = None # find leftmost", "# choose a random bond from list chosen_bond = random.choice(bonds) category = chosen_bond.category", "= correspondence.object_from_initial target = correspondence.object_from_target if initial.leftmost or initial.rightmost and target.leftmost or target.rightmost:", "= formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) flip_target_object = True coderack.propose_correspondence( object_from_initial, object_from_target,", "[] if changed.replacement.relation: object_list += [changed.replacement.relation] object_list += [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] # use", "\"bond\" ) destination = choose_neighbour(source) logging.info(f\"source: {source}, destination: {destination}\") assert destination bond_facet =", "assert workspace.number_of_unreplaced_objects() == 0 changed_objects = [o for o in workspace.initial.objects if o.changed]", "bond.direction_category.buffer = 100.0 bond.category.buffer = 100.0 logging.info(\"already exists: activate descriptors & Fizzle\") return", "= None logging.info(\"no relation found\") letter_of_initial_string.replacement = Replacement( letter_of_initial_string, letter_of_modified_string, relation ) if", "strength = group.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability #", ".coderack import coderack from .correspondence import Correspondence from .group import Group from .letter", "destination_descriptor, codelet, ) def bond_strength_tester(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() strength = bond.total_strength", "1.0] elif density > 0.2: distribution = [1.0, 1.0, 2.0, 5.0, 150.0, 5.0,", ">= probability coderack.propose_single_letter_group(source, codelet) return direction = first_bond.direction_category search = True bond_facet =", "not workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength() strength = correspondence.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert", "<= probability # activate some concepts for mapping in correspondence.concept_mappings: mapping.initial_description_type.buffer = 100.0", "2 # if there are no changed objects, propose a rule with no", "100.0 description.update_strength() strength = description.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\",", "bond_facet == source.left_bond.facet: bond_facet = source.left_bond.facet direction = source.left_bond.direction_category source = source.left_bond.left_object search", "direction, bond_facet, codelet ) # noinspection PyStringFormat def group_scout__whole_string(codelet): string = workspace.initial if", "bonds group.bond_list = [] for i in range(1, len(group.object_list)): object1 = group.object_list[i -", "description sliplinks = formulas.similar_property_links(description.descriptor) assert sliplinks values = [ sliplink.degree_of_association() * sliplink.destination.activation for", "__fight_incompatibles(incompatible_groups, group, \"Groups\", 1.0, 1.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() # create new", "if mydirection == slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond if not", "letter_of_modified_string = more_letters and more_letters[0] or None assert letter_of_modified_string position -= 1 initial_ascii", "side of object if direction == slipnet.right: first_bond = source.left_bond else: first_bond =", "formulas.select_list_position(values) chosen_property = descriptions[i] coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def description_strength_tester(codelet): description", "mydirection == slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond:", "+= [right_bond] next_object = objekt # if incompatible bonds exist - fight group.update_strength()", "chosen_object, chosen_property.category(), chosen_property, codelet ) def description_strength_tester(codelet): description = codelet.arguments[0] description.descriptor.buffer = 100.0", "formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage): return True return False # start the actual codelets def", "2.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.4: distribution = [1.0,", "incompatible bond incompatible_bond = correspondence.get_incompatible_bond() if incompatible_bond: # bond found - fight against", "top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category = codelet.arguments[0] source = __get_scout_source( category, formulas.local_bond_category_relevance, \"bond\" ) destination", "if mapping.initial_descriptor == slipnode: initial_descriptor = mapping.target_descriptor target_candidates = [] for objekt in", "source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet ) def top_down_group_scout__direction(codelet): direction =", "__structure_versus_structure( correspondence, 3.0, incompatible_bond, 2.0 ) # won against incompatible bond incompatible_group =", "source = choose_unmodified_object(\"intra_string_salience\", string.objects) return source def __get_bond_facet(source, destination): bond_facet = choose_bond_facet(source, destination)", "if all objects are still there for o in group.object_list: assert o in", "= workspace.target.intra_string_unhappiness logging.info( f\"initial : relevance = {initial_relevance}, \" f\"unhappiness = {int(initial_unhappiness)}\" )", "slipnet.identity: category = slipnet.sameness coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, )", "mydirection = slipnet.right elif source.rightmost: mydirection = slipnet.left else: activations = [slipnet.left.activation] activations", "descriptions] i = formulas.select_list_position(values) chosen_property = descriptions[i] coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet )", "the workspace # object_list = the union of this and the distingushing descriptors", "= first_bond.category assert category group_category = category.get_related_node(slipnet.group_category) logging.info(f\"trying from {source} to {category.name}\") bond_facet", "assert random.random() <= probability coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength, rule) def replacement_finder(): # choose random", "[n.activation for n in descriptions] i = formulas.select_list_position(values) chosen_property = descriptions[i] coderack.propose_description( chosen_object,", "to another object in the workspace # object_list = the union of this", "with {incompatible}\") return False logging.info(f\"won fight with {incompatible}\") return True def __fight_incompatibles( incompatibles,", "incompatible_correspondences: logging.info(\"trying to break incompatible correspondences\") assert __fight(bond, 2.0, incompatible_correspondences, 3.0) for incompatible", "= slipnet.sameness coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def bond_strength_tester(codelet):", "density > 0.4: distribution = [1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0,", "structure.break_the_structure() def bottom_up_description_scout(codelet): chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) description = formulas.choose_relevant_description_by_activation(chosen_object)", "!= 100.0 ): object_from_target = object_from_target.flipped_version() concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(),", "objects += [leftmost] assert leftmost.rightmost # choose a random bond from list chosen_bond", "> 0.4: distribution = [1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0,", "len(workspace.initial) == 1 and len(workspace.target) == 1: bond_density = 1.0 else: number_of_bonds =", "__get_scout_source( category, formulas.local_bond_category_relevance, \"group\" ) assert source assert not source.spans_string() if source.leftmost: direction", "destination.right_bond: continue if destination.right_bond.category != category: continue if destination.right_bond.direction_category != direction: if destination.right_bond.direction_category:", "category: if category == slipnet.sameness and isinstance(source, Letter): group = Group( source.string, slipnet.sameness_group,", "to see if all objects are still there for o in group.object_list: assert", "group.build_group() group.activate_descriptions() logging.info(\"building group\") def rule_builder(codelet): rule = codelet.arguments[0] if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return", "= bond.get_incompatible_bonds() logging.info(f\"number of incompatible_bonds: {len(incompatible_bonds)}\") if len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds, bond, \"bonds\",", "import slipnet from .workspace_formulas import choose_bond_facet from .workspace_formulas import choose_directed_neighbor from .workspace_formulas import", "or correspondence.flip_target_object and not workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength() strength = correspondence.total_strength probability = formulas.temperature_adjusted_probability(strength", "= Bond( source, destination, category, facet, source.get_descriptor(facet), destination.get_descriptor(facet), ) new_bond.build_bond() group.bond_list += [object1.right_bond]", "for mapping in correspondence.concept_mappings: mapping.initial_description_type.buffer = 100.0 mapping.initial_descriptor.buffer = 100.0 mapping.target_description_type.buffer = 100.0", "len(workspace.target.bonds) nearly_total_length = len(workspace.initial) + len(workspace.target) - 2 bond_density = number_of_bonds / nearly_total_length", "slipnet.left else: direction = slipnet.right if direction == slipnet.left: first_bond = source.left_bond else:", "source.left_bond else: first_bond = source.right_bond if not first_bond or first_bond.category != category: #", "group_category = category.get_related_node(slipnet.group_category) coderack.propose_group( objects, bonds, group_category, direction_category, bond_facet, codelet ) def group_strength_tester(codelet):", "descriptions values = [n.activation for n in descriptions] i = formulas.select_list_position(values) chosen_property =", "logging.info(\"top_down_bond_scout__category\") category = codelet.arguments[0] source = __get_scout_source( category, formulas.local_bond_category_relevance, \"bond\" ) destination =", "target = correspondence.object_from_target if initial.leftmost or initial.rightmost and target.leftmost or target.rightmost: # search", "return bonds = [] objects = [leftmost] while leftmost.right_bond: bonds += [leftmost.right_bond] leftmost", "search = True assert destination != source objects = [source] bonds = []", "object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def important_object_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"relative_importance\", workspace.initial.objects", "correspondence.object_from_target want_flip = correspondence.flip_target_object if want_flip: flipper = object_from_target.flipped_version() target_not_flipped = not workspace.target.equivalent_group(flipper)", "object in group with these bonds while search: search = False if not", "from . import formulas from . import temperature from .bond import Bond from", "): logging.info(f\"lost fight with {incompatible}\") return False logging.info(f\"won fight with {incompatible}\") return True", "group = codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength() strength = group.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0)", "1.0, 1.0, 1.0] elif density > 0.2: distribution = [1.0, 1.0, 2.0, 5.0,", "probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability # activate some concepts", "if there is an incompatible bond then fight against it initial = correspondence.object_from_initial", "= formulas.select_list_position(values) chosen = sliplinks[i] chosen_property = chosen.destination coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet", "coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet ) # noinspection PyStringFormat def group_scout__whole_string(codelet):", "assert descriptions values = [n.activation for n in descriptions] i = formulas.select_list_position(values) chosen_property", "descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\") category = source_descriptor.get_bond_category(destination_descriptor) assert category if category ==", "target_candidates ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get the posible concept mappings concept_mappings", "== initial_descriptor: target_candidates += [objekt] assert target_candidates object_from_target = choose_unmodified_object( \"inter_string_salience\", target_candidates )", "the {name}\") return True logging.info(f\"failed to break {name}: Fizzle\") return False logging.info(f\"no incompatible", "+= [left_bond] previous = objekt next_object = group.object_list[-1] for objekt in reversed(group.object_list[:-1]): right_bond", "= source.right_bond if not first_bond: logging.info(\"no first_bond\") else: logging.info(f\"first_bond: {first_bond}\") if first_bond and", "coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) else: coderack.propose_bond( destination, source,", "destination.right_bond.category != category: continue if destination.right_bond.direction_category != direction: if destination.right_bond.direction_category: continue if not", "logging.info(f\"proposing group from {source} to {destination}\") objects = [source] bonds = [] while", "= choose_unmodified_object( \"inter_string_salience\", workspace.target.objects ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get the posible", "in concept_mappings: slippiness = mapping.slippability() / 100.0 probability_of_slippage = formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage): return", "= True else: temperature.clamp_time = coderack.codelets_run + 100 temperature.clamped = True formulas.Temperature =", "True else: temperature.clamp_time = coderack.codelets_run + 100 temperature.clamped = True formulas.Temperature = 100.0", "False if ( object_from_initial.spans_string() and object_from_target.spans_string() and slipnet.direction_category in initial_description_types and __all_opposite_mappings(formulas.opposite_mappings) and", "= chosen_bond.category direction_category = chosen_bond.direction_category bond_facet = chosen_bond.facet bonds = possible_group_bonds(category, direction_category, bond_facet,", "chosen_bond = random.choice(bonds) category = chosen_bond.category direction_category = chosen_bond.direction_category bond_facet = chosen_bond.facet bonds", "values = [ sliplink.degree_of_association() * sliplink.destination.activation for sliplink in sliplinks ] i =", "= number_of_bonds / nearly_total_length if bond_density > 1.0: bond_density = 1.0 cutoff =", "1.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.6: distribution = [2.0,", "initial_relevance + target_relevance unhappinesses = initial_unhappiness + target_unhappiness randomized = random.random() * (relevances", "assert sliplinks values = [ sliplink.degree_of_association() * sliplink.destination.activation for sliplink in sliplinks ]", "+= [leftmost] assert leftmost.rightmost # choose a random bond from list chosen_bond =", "description needs to be flipped opposites = [ m for m in distinguishing_mappings", "if left_bond.direction_category == group.direction_category: continue incompatible_bonds += [left_bond] previous = objekt next_object =", "chosen_property.category(), chosen_property, codelet ) def top_down_description_scout(codelet): description_type = codelet.arguments[0] chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects)", "or ( not target_in_objects and (not (want_flip and target_not_flipped)) ) if correspondence.reflexive(): #", "= __get_scout_source( category, formulas.local_bond_category_relevance, \"group\" ) assert source assert not source.spans_string() if source.leftmost:", "# some methods common to the codelets def __show_which_string_object_is_from(structure): if not structure: return", "assert not formulas.coin_flip(probability_of_fizzle) # choose a structure at random structures = [ s", "selected: {workspace.target} for {type_name}\") else: logging.info(f\"initial string selected: {workspace.initial} for {type_name}\") source =", "changed_objects = [o for o in workspace.initial.objects if o.changed] # assert len(changed_objects) <", "choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects ) object_from_target = choose_unmodified_object( \"inter_string_salience\", workspace.target.objects ) assert object_from_initial.spans_string() ==", "the other side of object if direction == slipnet.right: first_bond = source.left_bond else:", "= [n.activation for n in descriptions] i = formulas.select_list_position(values) chosen_property = descriptions[i] coderack.propose_description(", "if not source.left_bond: continue if source.left_bond.category != category: continue if source.left_bond.direction_category != direction:", "correspondence.object_from_target if initial.leftmost or initial.rightmost and target.leftmost or target.rightmost: # search for the", "activate descriptors & Fizzle\") return incompatible_bonds = bond.get_incompatible_bonds() logging.info(f\"number of incompatible_bonds: {len(incompatible_bonds)}\") if", "if source.leftmost: direction = slipnet.right elif source.rightmost: direction = slipnet.left else: activations =", "1.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() # create new bonds group.bond_list = []", "continue if source.left_bond.direction_category != direction: if source.left_bond.direction_category: continue if not bond_facet or bond_facet", "destination) source_descriptor, destination_descriptor = bond_descriptors logging.info(f\"source descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\") category =", "probability_of_fizzle = (100.0 - formulas.Temperature) / 100.0 assert not formulas.coin_flip(probability_of_fizzle) # choose a", "direction_category = chosen_bond.direction_category bond_facet = chosen_bond.facet bonds = possible_group_bonds(category, direction_category, bond_facet, bonds) assert", "incompatible_weight ): if len(incompatibles): if __fight(structure, structure_weight, incompatibles, incompatible_weight): logging.info(f\"broke the {name}\") return", "assert rule.total_strength # fight against other rules if workspace.rule: assert __structure_versus_structure(rule, 1.0, workspace.rule,", "= {source}\") assert not source.spans_string() if source.leftmost: mydirection = slipnet.right elif source.rightmost: mydirection", "[right_bond] next_object = objekt # if incompatible bonds exist - fight group.update_strength() assert", "= [ sliplink.degree_of_association() * sliplink.destination.activation for sliplink in sliplinks ] i = formulas.select_list_position(values)", "this object group = leftmost coderack.propose_group( group.object_list, group.bond_list, group.group_category, group.direction_category, group.facet, codelet, )", "codelet, rule.total_strength, rule) def replacement_finder(): # choose random letter in initial string letters", "isinstance(o, Letter) and o.left_index == position ] letter_of_modified_string = more_letters and more_letters[0] or", "group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups, group, \"Groups\", 1.0, 1.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() #", "workspace.rule.build_translated_rule(): workspace.found_answer = True else: temperature.clamp_time = coderack.codelets_run + 100 temperature.clamped = True", "slipnet.sameness coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def bond_strength_tester(codelet): bond", "group, \"bonds\", 1.0, 1.0) # fight incompatible groups # fight all groups containing", "= slipnet.right elif source.rightmost: mydirection = slipnet.left else: activations = [slipnet.left.activation] activations +=", "source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet ) # noinspection PyStringFormat def", "letter in initial string letters = [o for o in workspace.initial.objects if isinstance(o,", "new_bond.build_bond() group.bond_list += [object1.right_bond] for incompatible in incompatible_groups: incompatible.break_the_structure() group.build_group() group.activate_descriptions() logging.info(\"building group\")", "( object_from_target in workspace.objects or correspondence.flip_target_object and not workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength() strength =", "correspondence.object_from_initial object_from_target = correspondence.object_from_target want_flip = correspondence.flip_target_object if want_flip: flipper = object_from_target.flipped_version() target_not_flipped", "{name}\") return True logging.info(f\"failed to break {name}: Fizzle\") return False logging.info(f\"no incompatible {name}\")", "mydirection = slipnet.right if mydirection == slipnet.left: first_bond = source.left_bond else: first_bond =", "bonds group_category = category.get_related_node(slipnet.group_category) coderack.propose_group( objects, bonds, group_category, direction_category, bond_facet, codelet ) def", "= chosen_bond.direction_category bond_facet = chosen_bond.facet bonds = possible_group_bonds(category, direction_category, bond_facet, bonds) assert bonds", "= mapping.target_descriptor target_candidates = [] for objekt in workspace.target.objects: for description in objekt.relevant_descriptions():", "reversed(group.object_list[:-1]): right_bond = objekt.right_bond if right_bond: if right_bond.right_object == next_object: continue if right_bond.direction_category", "codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target want_flip = correspondence.flip_target_object if want_flip: flipper", "workspace.target.equivalent_group(flipper) else: target_not_flipped = False initial_in_objects = object_from_initial in workspace.objects target_in_objects = object_from_target", "* weight2 ) rhs = (weighted_strength1 + weighted_strength2) * random.random() logging.info(f\"{weighted_strength1} > {rhs}:", "# choose a structure at random structures = [ s for s in", "these bonds search = True while search: search = False if not source.left_bond:", "(weighted_strength1 + weighted_strength2) * random.random() logging.info(f\"{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}\") return weighted_strength1", "group_category = codelet.arguments[0] category = group_category.get_related_node(slipnet.bond_category) assert category source = __get_scout_source( category, formulas.local_bond_category_relevance,", "the distingushing descriptors if changed.correspondence: target_object = changed.correspondence.object_from_target new_list = [] slippages =", "bonds) assert bonds group_category = category.get_related_node(slipnet.group_category) coderack.propose_group( objects, bonds, group_category, direction_category, bond_facet, codelet", "right_bond.direction_category == group.direction_category: continue incompatible_bonds += [right_bond] next_object = objekt # if incompatible", "def description_strength_tester(codelet): description = codelet.arguments[0] description.descriptor.buffer = 100.0 description.update_strength() strength = description.total_strength probability", "__fight(bond, 2.0, incompatible_correspondences, 3.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() for incompatible in incompatible_groups:", "or \"d\" object_list = [] if changed.replacement.relation: object_list += [changed.replacement.relation] object_list += [", "bond_facet, codelet ) # noinspection PyStringFormat def group_scout__whole_string(codelet): string = workspace.initial if random.random()", "if len(workspace.initial) == 1 and len(workspace.target) == 1: bond_density = 1.0 else: number_of_bonds", "logging.info( f\"Replacement already found for {letter_of_initial_string}, so fizzling\" ) return position = letter_of_initial_string.left_index", "use conceptual depth to choose a description value_list = [] for node in", "these bonds search = True destination = source while search: search = False", ") def rule_scout(codelet): assert workspace.number_of_unreplaced_objects() == 0 changed_objects = [o for o in", "coderack.propose_rule(None, None, None, None, codelet) changed = changed_objects[-1] # generate a list of", "initial string letters = [o for o in workspace.initial.objects if isinstance(o, Letter)] letter_of_initial_string", "slipnet.sameness_group, None, slipnet.letter_category, [source], [], ) probability = group.single_letter_group_probability() assert random.random() >= probability", ". import formulas from . import temperature from .bond import Bond from .bond", "bond from list chosen_bond = random.choice(bonds) category = chosen_bond.category direction_category = chosen_bond.direction_category bond_facet", "to choose a description value_list = [] for node in object_list: depth =", "source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"bond\" ) destination = choose_directed_neighbor(source, direction) assert destination", "and o.left_index == position ] letter_of_modified_string = more_letters and more_letters[0] or None assert", "category = slipnet.sameness coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def", "assert letter_of_modified_string position -= 1 initial_ascii = ord(workspace.initial_string[position]) modified_ascii = ord(workspace.modified_string[position]) diff =", "search = False if not source.left_bond: continue if source.left_bond.category != category: continue if", "for node in object_list: node = node.apply_slippages(slippages) if target_object.described(node): if target_object.distinguishing_descriptor(node): new_list +=", "in workspace.objects assert initial_in_objects or ( not target_in_objects and (not (want_flip and target_not_flipped))", "{first_bond}\") if first_bond and not first_bond.direction_category: direction = None if not first_bond or", "= [] if changed.replacement.relation: object_list += [changed.replacement.relation] object_list += [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] #", "source.get_descriptor(bond_facet) destination_descriptor = destination.get_descriptor(bond_facet) assert source_descriptor assert destination_descriptor return source_descriptor, destination_descriptor def __all_opposite_mappings(mappings):", "== source.left_bond.facet: bond_facet = source.left_bond.facet direction = source.left_bond.direction_category source = source.left_bond.left_object search =", "string.objects) return source def __get_bond_facet(source, destination): bond_facet = choose_bond_facet(source, destination) assert bond_facet return", "assert leftmost.rightmost # choose a random bond from list chosen_bond = random.choice(bonds) category", "chosen = sliplinks[i] chosen_property = chosen.destination coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def", "sliplinks values = [ sliplink.degree_of_association() * sliplink.destination.activation for sliplink in sliplinks ] i", "workspace.objects) __show_which_string_object_is_from(source) destination = choose_neighbour(source) assert destination logging.info(f\"destination: {destination}\") bond_facet = __get_bond_facet(source, destination)", "source.left_bond.facet direction = source.left_bond.direction_category source = source.left_bond.left_object search = True destination = source", "all objects for structure in break_objects: break_probability = formulas.temperature_adjusted_probability( structure.total_strength / 100.0 )", "first_bond: logging.info(\"no first_bond2\") else: logging.info(f\"first_bond2: {first_bond}\") if first_bond and not first_bond.direction_category: direction =", "# should this be += ?? assert object_list # use conceptual depth to", "not (incompatibles and len(incompatibles)): return True for incompatible in incompatibles: if not __structure_versus_structure(", "i = formulas.select_list_position(values) chosen_property = descriptions[i] coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def", "letter_of_modified_string, relation ) if relation != slipnet.sameness: letter_of_initial_string.changed = True workspace.changed_object = letter_of_initial_string", "def breaker(): probability_of_fizzle = (100.0 - formulas.Temperature) / 100.0 assert not formulas.coin_flip(probability_of_fizzle) #", "1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0] else: distribution = [1.0,", "assert description.object in workspace.objects if description.object.described(description.descriptor): description.description_type.buffer = 100.0 description.descriptor.buffer = 100.0 else:", "incompatible.break_the_structure() logging.info(f\"building bond {bond}\") bond.build_bond() # pylint: disable=too-many-branches # pylint: disable=too-many-statements def top_down_group_scout__category(codelet):", "+= ?? assert object_list # use conceptual depth to choose a description value_list", "= formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) relation = object_list[i] coderack.propose_rule( slipnet.letter_category,", "= group.object_list[-1] for objekt in reversed(group.object_list[:-1]): right_bond = objekt.right_bond if right_bond: if right_bond.right_object", "= correspondence.object_from_initial.correspondence for mapping in correspondence.concept_mappings: if mapping.label: mapping.label.buffer = 100.0 if not", "{workspace.target}\") else: logging.info(f\"initial string selected: {workspace.initial}\") # find leftmost object & the highest", "for incompatible in incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building bond {bond}\") bond.build_bond() # pylint: disable=too-many-branches #", "group\") def rule_builder(codelet): rule = codelet.arguments[0] if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return rule.update_strength() assert rule.total_strength", "group, \"Groups\", 1.0, 1.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() # create new bonds", "source logging.info(f\"proposing group from {source} to {destination}\") objects = [source] bonds = []", ".correspondence import Correspondence from .group import Group from .letter import Letter from .replacement", "category: continue if source.left_bond.direction_category != direction: if source.left_bond.direction_category: continue if not bond_facet or", "workspace.objects assert initial_in_objects or ( not target_in_objects and (not (want_flip and target_not_flipped)) )", "to be flipped opposites = [ m for m in distinguishing_mappings if m.initial_description_type", "import coderack from .correspondence import Correspondence from .group import Group from .letter import", "correspondence.concept_mappings: mapping.initial_description_type.buffer = 100.0 mapping.initial_descriptor.buffer = 100.0 mapping.target_description_type.buffer = 100.0 mapping.target_descriptor.buffer = 100.0", "slippiness = mapping.slippability() / 100.0 probability_of_slippage = formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage): return True return", "a relation value_list = [] for node in object_list: depth = node.conceptual_depth value", "logging.info(\"no first_bond\") else: logging.info(f\"first_bond: {first_bond}\") if first_bond and not first_bond.direction_category: direction = None", ") forward_bond = source_descriptor.get_bond_category(destination_descriptor) if forward_bond == slipnet.identity: forward_bond = slipnet.sameness backward_bond =", "rule.total_strength # fight against other rules if workspace.rule: assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0)", "unhappinesses) initials = initial_relevance + initial_unhappiness if randomized > initials: string = workspace.target", "changed.get_descriptor(slipnet.string_position_category) if position: object_list += [position] letter = changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter = [ o", "100.0) logging.info(f\"bond strength = {strength} for {bond}\") assert formulas.coin_flip(probability) bond.facet.buffer = 100.0 bond.source_descriptor.buffer", "letter category # if it is the only one of its type in", "not changed_objects: return coderack.propose_rule(None, None, None, None, codelet) changed = changed_objects[-1] # generate", "codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"bond\" ) destination = choose_directed_neighbor(source, direction) assert", "selected: {workspace.initial} for {type_name}\") source = choose_unmodified_object(\"intra_string_salience\", string.objects) return source def __get_bond_facet(source, destination):", "elif source.rightmost: mydirection = slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if", "not first_bond or first_bond.direction_category != direction: if mydirection == slipnet.right: first_bond = source.left_bond", "= group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups, group, \"Groups\", 1.0, 1.0) for incompatible in incompatible_bonds: incompatible.break_the_structure()", "choose random letter in initial string letters = [o for o in workspace.initial.objects", "for incompatible in incompatible_bonds: incompatible.break_the_structure() for incompatible in incompatible_groups: incompatible.break_the_structure() for incompatible in", "first_bond or first_bond.category != category: # check the other side of object if", "structure1.update_strength() structure2.update_strength() weighted_strength1 = formulas.temperature_adjusted_value( structure1.total_strength * weight1 ) weighted_strength2 = formulas.temperature_adjusted_value( structure2.total_strength", "direction = source.left_bond.direction_category source = source.left_bond.left_object search = True # find rightmost object", "node in object_list: node = node.apply_slippages(slippages) if target_object.described(node): if target_object.distinguishing_descriptor(node): new_list += [node]", "incompatible_group: assert __structure_versus_structure( correspondence, 1.0, incompatible_group, 1.0 ) # if there is an", "def __slippability(concept_mappings): for mapping in concept_mappings: slippiness = mapping.slippability() / 100.0 probability_of_slippage =", "= Group( source.string, slipnet.sameness_group, None, slipnet.letter_category, [source], [], ) probability = group.single_letter_group_probability() assert", "formulas.temperature_adjusted_value( structure2.total_strength * weight2 ) rhs = (weighted_strength1 + weighted_strength2) * random.random() logging.info(f\"{weighted_strength1}", "= formulas.select_list_position(value_list) descriptor = object_list[i] # choose the relation (change the letmost object", ": relevance = {initial_relevance}, \" f\"unhappiness = {int(initial_unhappiness)}\" ) logging.info( f\"target : relevance", "right_bond = objekt.right_bond if right_bond: if right_bond.right_object == next_object: continue if right_bond.direction_category ==", "object_list: depth = node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list)", "if objekt.leftmost: leftmost = objekt while leftmost.group and leftmost.group.bond_category == slipnet.sameness: leftmost =", "bond_facet, destination_descriptor, source_descriptor, codelet, ) def top_down_bond_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source(", "if incompatible bonds exist - fight group.update_strength() assert __fight_incompatibles(incompatible_bonds, group, \"bonds\", 1.0, 1.0)", "if first_bond and not first_bond.direction_category: direction = None assert first_bond assert first_bond.direction_category ==", "# if the correspondence exists, activate concept mappings # and add new ones", "continue if not bond_facet or bond_facet == source.left_bond.facet: bond_facet = source.left_bond.facet direction =", "= chosen.destination coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def top_down_description_scout(codelet): description_type = codelet.arguments[0]", "* weight1 ) weighted_strength2 = formulas.temperature_adjusted_value( structure2.total_strength * weight2 ) rhs = (weighted_strength1", "workspace.rule if len(workspace.initial) == 1 and len(workspace.target) == 1: bond_density = 1.0 else:", "Group( source.string, slipnet.sameness_group, None, slipnet.letter_category, [source], [], ) probability = group.single_letter_group_probability() assert random.random()", "destination, source, category, bond_facet, destination_descriptor, source_descriptor, codelet, ) def top_down_bond_scout__direction(codelet): direction = codelet.arguments[0]", ".bond import possible_group_bonds from .coderack import coderack from .correspondence import Correspondence from .group", "direction: if destination.right_bond.direction_category: continue if not bond_facet or bond_facet == destination.right_bond.facet: bond_facet =", "node.apply_slippages(slippages) if target_object.described(node): if target_object.distinguishing_descriptor(node): new_list += [node] object_list = new_list # should", "rhs}\") return weighted_strength1 > rhs def __fight(structure, structure_weight, incompatibles, incompatible_weight): if not (incompatibles", "for string_bond in bond.string.bonds: if bond.same_neighbours(string_bond) and bond.same_categories(string_bond): if bond.direction_category: bond.direction_category.buffer = 100.0", "post builder & activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0 if group.direction_category: group.direction_category.buffer = 100.0", "== object_from_target.spans_string() # get the posible concept mappings concept_mappings = formulas.get_mappings( object_from_initial, object_from_target,", "incompatible.object_from_target.letter_span() ) assert __structure_versus_structure( correspondence, correspondence_spans, incompatible, incompatible_spans ) incompatible_bond = None incompatible_group", ") def top_down_description_scout(codelet): description_type = codelet.arguments[0] chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object)", "destination logging.info(f\"destination: {destination}\") bond_facet = __get_bond_facet(source, destination) logging.info(f\"chosen bond facet: {bond_facet.get_name()}\") logging.info(f\"Source: {source},", "previous: continue if left_bond.direction_category == group.direction_category: continue incompatible_bonds += [left_bond] previous = objekt", "distribution[i] if total >= stop: return i + 1 return len(distribution) def rule_translator():", "bond_facet, source_descriptor, destination_descriptor, codelet, ) def rule_scout(codelet): assert workspace.number_of_unreplaced_objects() == 0 changed_objects =", "o in workspace.modified.objects if isinstance(o, Letter) and o.left_index == position ] letter_of_modified_string =", "initial_in_objects = object_from_initial in workspace.objects target_in_objects = object_from_target in workspace.objects assert initial_in_objects or", "to break {name}: Fizzle\") return False logging.info(f\"no incompatible {name}\") return True def __slippability(concept_mappings):", "return # check to see if all objects are still there for o", "1.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.4: distribution = [1.0, 2.0,", "{source} to {category.name}\") bond_facet = None # find leftmost object in group with", "False logging.info(f\"no incompatible {name}\") return True def __slippability(concept_mappings): for mapping in concept_mappings: slippiness", ".slipnet import slipnet from .workspace_formulas import choose_bond_facet from .workspace_formulas import choose_directed_neighbor from .workspace_formulas", "distingushing descriptors if changed.correspondence: target_object = changed.correspondence.object_from_target new_list = [] slippages = workspace.slippages()", "coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def description_strength_tester(codelet): description = codelet.arguments[0] description.descriptor.buffer =", "object_from_target = choose_unmodified_object( \"inter_string_salience\", target_candidates ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get the", "None, None, None, codelet) changed = changed_objects[-1] # generate a list of distinguishing", "{name}: Fizzle\") return False logging.info(f\"no incompatible {name}\") return True def __slippability(concept_mappings): for mapping", "100.0 bond.destination_descriptor.buffer = 100.0 logging.info(\"succeeded: posting bond-builder\") coderack.new_codelet(\"bond-builder\", codelet, strength) def bond_builder(codelet): bond", "in incompatibles: incompatible.break_the_structure() # break incompatible group and bond if they exist if", "corresponds to another object in the workspace # object_list = the union of", "True def __fight_incompatibles( incompatibles, structure, name, structure_weight, incompatible_weight ): if len(incompatibles): if __fight(structure,", "if not first_bond or first_bond.category != category: # check the other side of", "# find leftmost object & the highest group to which it belongs leftmost", "be flipped opposites = [ m for m in distinguishing_mappings if m.initial_description_type ==", "incompatible groups # fight all groups containing these objects incompatible_groups = group.get_incompatible_groups() assert", "want_flip: flipper = object_from_target.flipped_version() target_not_flipped = not workspace.target.equivalent_group(flipper) else: target_not_flipped = False initial_in_objects", "workspace.objects # check to see if bonds are there of the same direction", "import WorkspaceObject # some methods common to the codelets def __show_which_string_object_is_from(structure): if not", "workspace.initial.intra_string_unhappiness target_unhappiness = workspace.target.intra_string_unhappiness logging.info( f\"initial : relevance = {initial_relevance}, \" f\"unhappiness =", "assert random.random() <= probability # it is strong enough - post builder &", "if not structure: return \"unstructured\" if isinstance(structure, WorkspaceObject): return \"target\" if structure.string ==", "and not first_bond.direction_category: direction = None if not first_bond or first_bond.direction_category != direction:", "modified_ascii if abs(diff) < 2: relations = {0: slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor}", "flip_target_object = True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def correspondence_strength_tester(codelet): correspondence", "destination.right_bond.facet direction = source.right_bond.direction_category destination = destination.right_bond.right_object search = True assert destination !=", "1.0, 1.0) # fight all incompatible correspondences incompatible_correspondences = [] if bond.left_object.leftmost or", "direction = slipnet.right if direction == slipnet.left: first_bond = source.left_bond else: first_bond =", "assert __fight_incompatibles(incompatible_groups, bond, \"groups\", 1.0, 1.0) # fight all incompatible correspondences incompatible_correspondences =", "assert category source = __get_scout_source( category, formulas.local_bond_category_relevance, \"group\" ) assert source assert not", "source.right_bond if not first_bond or first_bond.category != category: if category == slipnet.sameness and", "): object_from_target = object_from_target.flipped_version() concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) flip_target_object", "letter_of_initial_string logging.info(\"building replacement\") def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category = codelet.arguments[0] source = __get_scout_source( category,", "use conceptual depth to choose a relation value_list = [] for node in", "not destination.right_bond: continue if destination.right_bond.category != category: continue if destination.right_bond.direction_category != direction: if", "while search: search = False if not destination.right_bond: continue if destination.right_bond.category != category:", "source = __get_scout_source( category, formulas.local_bond_category_relevance, \"bond\" ) destination = choose_neighbour(source) logging.info(f\"source: {source}, destination:", "= bond_descriptors logging.info(f\"source descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\") category = source_descriptor.get_bond_category(destination_descriptor) assert category", "* 10.0 assert cutoff >= formulas.actual_temperature if workspace.rule.build_translated_rule(): workspace.found_answer = True else: temperature.clamp_time", "initial_in_objects or ( not target_in_objects and (not (want_flip and target_not_flipped)) ) if correspondence.reflexive():", "assert concept_mappings assert __slippability(concept_mappings) # find out if any are distinguishing distinguishing_mappings =", "incompatibles: if not __structure_versus_structure( structure, structure_weight, incompatible, incompatible_weight ): logging.info(f\"lost fight with {incompatible}\")", "False initial_in_objects = object_from_initial in workspace.objects target_in_objects = object_from_target in workspace.objects assert initial_in_objects", "__all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation != 100.0 ): object_from_target = object_from_target.flipped_version() concept_mappings = formulas.get_mappings( object_from_initial,", "in sliplinks ] i = formulas.select_list_position(values) chosen = sliplinks[i] chosen_property = chosen.destination coderack.propose_description(", "weighted_strength1 = formulas.temperature_adjusted_value( structure1.total_strength * weight1 ) weighted_strength2 = formulas.temperature_adjusted_value( structure2.total_strength * weight2", "bonds = possible_group_bonds(category, direction_category, bond_facet, bonds) assert bonds group_category = category.get_related_node(slipnet.group_category) coderack.propose_group( objects,", "source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category = slipnet.sameness logging.info(f\"proposing {category.name} bond", "in correspondence.concept_mappings: if mapping.label: mapping.label.buffer = 100.0 if not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings += [mapping]", "the incompatible bond incompatible_bond = correspondence.get_incompatible_bond() if incompatible_bond: # bond found - fight", "group.bond_list, group.group_category, group.direction_category, group.facet, codelet, ) return bonds = [] objects = [leftmost]", "assert destination_descriptor return source_descriptor, destination_descriptor def __all_opposite_mappings(mappings): return len([m for m in mappings", "= source while search: search = False if not destination.right_bond: continue if destination.right_bond.category", "150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.2: distribution =", "source.string, slipnet.sameness_group, None, slipnet.letter_category, [source], [], ) probability = group.single_letter_group_probability() assert random.random() >=", "[ sliplink.degree_of_association() * sliplink.destination.activation for sliplink in sliplinks ] i = formulas.select_list_position(values) chosen", "= objekt # if incompatible bonds exist - fight group.update_strength() assert __fight_incompatibles(incompatible_bonds, group,", "workspace.target.objects: for description in objekt.relevant_descriptions(): if description.descriptor == initial_descriptor: target_candidates += [objekt] assert", "more_letters = [ o for o in workspace.modified.objects if isinstance(o, Letter) and o.left_index", "0.6: distribution = [2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0]", "10.0 assert cutoff >= formulas.actual_temperature if workspace.rule.build_translated_rule(): workspace.found_answer = True else: temperature.clamp_time =", "= group.object_list[i - 1] object2 = group.object_list[i] if not object1.right_bond: if group.direction_category ==", ".workspace_formulas import choose_neighbour from .workspace_formulas import choose_unmodified_object from .workspace_formulas import workspace from .workspace_object", "= formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) descriptor = object_list[i] # choose", "object1 category = group.group_category.get_related_node(slipnet.bond_category) facet = group.facet new_bond = Bond( source, destination, category,", "it assert __structure_versus_structure( correspondence, 3.0, incompatible_bond, 2.0 ) # won against incompatible bond", "in initial string letters = [o for o in workspace.initial.objects if isinstance(o, Letter)]", "workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) descriptions = chosen_object.get_possible_descriptions(description_type) assert descriptions values = [n.activation for", "= node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) relation =", "category = chosen_bond.category direction_category = chosen_bond.direction_category bond_facet = chosen_bond.facet bonds = possible_group_bonds(category, direction_category,", "bond \") coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def rule_scout(codelet):", "2.0 ) # won against incompatible bond incompatible_group = target.group if incompatible_group: assert", "while leftmost.group and leftmost.group.bond_category == slipnet.sameness: leftmost = leftmost.group if leftmost.spans_string(): # the", "if they exist if incompatible_bond: incompatible_bond.break_the_structure() if incompatible_group: incompatible_group.break_the_structure() if incompatible_rule: workspace.break_rule() correspondence.build_correspondence()", "letter_of_initial_string.replacement: logging.info( f\"Replacement already found for {letter_of_initial_string}, so fizzling\" ) return position =", "bond.destination in workspace.objects for string_bond in bond.string.bonds: if bond.same_neighbours(string_bond) and bond.same_categories(string_bond): if bond.direction_category:", "first_bond or first_bond.direction_category != direction: if mydirection == slipnet.right: first_bond = source.left_bond else:", "= destination.right_bond.right_object search = True assert destination != source logging.info(f\"proposing group from {source}", "== group.direction_category: continue incompatible_bonds += [right_bond] next_object = objekt # if incompatible bonds", "True assert destination != source logging.info(f\"proposing group from {source} to {destination}\") objects =", "object corresponds to another object in the workspace # object_list = the union", "left_bond.left_object == previous: continue if left_bond.direction_category == group.direction_category: continue incompatible_bonds += [left_bond] previous", "chosen_object __show_which_string_object_is_from(chosen_object) descriptions = chosen_object.get_possible_descriptions(description_type) assert descriptions values = [n.activation for n in", "initial_descriptor = slipnode for mapping in workspace.slippages(): if mapping.initial_descriptor == slipnode: initial_descriptor =", "{int(initial_unhappiness)}\" ) logging.info( f\"target : relevance = {target_relevance}, \" f\"unhappiness = {int(target_unhappiness)}\" )", "mappings # and add new ones to the existing corr. existing = correspondence.object_from_initial.correspondence", "workspace.found_answer = True else: temperature.clamp_time = coderack.codelets_run + 100 temperature.clamped = True formulas.Temperature", "against all correspondences if incompatibles: correspondence_spans = ( correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span() ) for", "not len(other_objects_of_same_letter): object_list += [letter] # if this object corresponds to another object", "description.object.described(description.descriptor): description.description_type.buffer = 100.0 description.descriptor.buffer = 100.0 else: description.build() def bottom_up_bond_scout(codelet): source =", "probability = formulas.temperature_adjusted_probability(strength / 100.0) assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet, strength) def description_builder(codelet): description", "choose a relation value_list = [] for node in object_list: depth = node.conceptual_depth", "__get_cut_off(density): if density > 0.8: distribution = [5.0, 150.0, 5.0, 2.0, 1.0, 1.0,", "both objects span the strings, check to see if the # string description", "group.bond_list += [object1.right_bond] for incompatible in incompatible_groups: incompatible.break_the_structure() group.build_group() group.activate_descriptions() logging.info(\"building group\") def", "2.0, incompatible_correspondences, 3.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() for incompatible in incompatible_groups: incompatible.break_the_structure()", "incompatible, incompatible_spans ) incompatible_bond = None incompatible_group = None # if there is", "source = object1 destination = object2 else: source = object2 destination = object1", ") descriptors = object_from_initial.relevant_distinguishing_descriptors() slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode initial_descriptor = slipnode for", "if not first_bond: logging.info(\"no first_bond\") else: logging.info(f\"first_bond: {first_bond}\") if first_bond and not first_bond.direction_category:", "group.object_list, group.bond_list, group.group_category, group.direction_category, group.facet, codelet, ) return bonds = [] objects =", "random bond from list chosen_bond = random.choice(bonds) category = chosen_bond.category direction_category = chosen_bond.direction_category", "choose_directed_neighbor(source, direction) assert destination logging.info(f\"to object: {destination}\") bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor", "if source.left_bond.category != category: continue if source.left_bond.direction_category != direction: if source.left_bond.direction_category: continue if", "i in range(1, len(group.object_list)): object1 = group.object_list[i - 1] object2 = group.object_list[i] if", "None assert first_bond assert first_bond.direction_category == direction logging.info(f\"possible group: {first_bond}\") category = first_bond.category", "+ incompatible.object_from_target.letter_span() ) assert __structure_versus_structure( correspondence, correspondence_spans, incompatible, incompatible_spans ) incompatible_bond = None", "category: # check the other side of object if direction == slipnet.right: first_bond", "{source} to {destination}\") objects = [source] bonds = [] while source != destination:", "the letmost object to \"successor\" or \"d\" object_list = [] if changed.replacement.relation: object_list", "1.0 cutoff = __get_cut_off(bond_density) * 10.0 assert cutoff >= formulas.actual_temperature if workspace.rule.build_translated_rule(): workspace.found_answer", "previous = group.object_list[0] for objekt in group.object_list[1:]: left_bond = objekt.left_bond if left_bond: if", "structure in break_objects: structure.break_the_structure() def bottom_up_description_scout(codelet): chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object)", "100.0 assert not formulas.coin_flip(probability_of_fizzle) # choose a structure at random structures = [", "incompatibles: incompatible.break_the_structure() # break incompatible group and bond if they exist if incompatible_bond:", "bond_density = number_of_bonds / nearly_total_length if bond_density > 1.0: bond_density = 1.0 cutoff", "nearly_total_length if bond_density > 1.0: bond_density = 1.0 cutoff = __get_cut_off(bond_density) * 10.0", "leftmost object & the highest group to which it belongs leftmost = None", ".workspace_formulas import choose_bond_facet from .workspace_formulas import choose_directed_neighbor from .workspace_formulas import choose_neighbour from .workspace_formulas", "codelet.arguments[0] category = group_category.get_related_node(slipnet.bond_category) assert category source = __get_scout_source( category, formulas.local_bond_category_relevance, \"group\" )", "bond_builder(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() assert bond.source in workspace.objects or bond.destination in", "fight against it initial = correspondence.object_from_initial target = correspondence.object_from_target if initial.leftmost or initial.rightmost", ") object_from_target = choose_unmodified_object( \"inter_string_salience\", workspace.target.objects ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get", "+= [value] i = formulas.select_list_position(value_list) descriptor = object_list[i] # choose the relation (change", "object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) flip_target_object = True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object,", "leftmost = leftmost.right_bond.right_object objects += [leftmost] assert leftmost.rightmost # choose a random bond", "source, destination): source_descriptor = source.get_descriptor(bond_facet) destination_descriptor = destination.get_descriptor(bond_facet) assert source_descriptor assert destination_descriptor return", "slipnet.right: source = object1 destination = object2 else: source = object2 destination =", "1: slipnet.predecessor} relation = relations[diff] logging.info(f\"Relation found: {relation.name}\") else: relation = None logging.info(\"no", "if not first_bond or first_bond.direction_category != direction: if mydirection == slipnet.right: first_bond =", "existing.concept_mappings += [mapping] return incompatibles = correspondence.get_incompatible_correspondences() # fight against all correspondences if", "if not __structure_versus_structure( structure, structure_weight, incompatible, incompatible_weight ): logging.info(f\"lost fight with {incompatible}\") return", "logging.info(f\"trying from {source} to {category.name}\") bond_facet = None # find leftmost object in", "incompatibles, incompatible_weight): logging.info(f\"broke the {name}\") return True logging.info(f\"failed to break {name}: Fizzle\") return", "direction = None if not first_bond or first_bond.direction_category != direction: if mydirection ==", "is an incompatible bond then fight against it initial = correspondence.object_from_initial target =", "choose_bond_facet(source, destination) assert bond_facet return bond_facet def __get_descriptors(bond_facet, source, destination): source_descriptor = source.get_descriptor(bond_facet)", "string selected: {workspace.initial}\") # find leftmost object & the highest group to which", "= correspondence.object_from_target assert object_from_initial in workspace.objects assert ( object_from_target in workspace.objects or correspondence.flip_target_object", "objects = [source] bonds = [] while source != destination: bonds += [source.right_bond]", "category == slipnet.identity: category = slipnet.sameness logging.info(f\"proposing {category.name} bond \") coderack.propose_bond( source, destination,", "1.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.6: distribution = [2.0, 5.0,", "2 bond_density = number_of_bonds / nearly_total_length if bond_density > 1.0: bond_density = 1.0", "if randomized > initials: string = workspace.target logging.info(f\"target string selected: {workspace.target} for {type_name}\")", "3.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() for incompatible in incompatible_groups: incompatible.break_the_structure() for incompatible", "to break incompatible correspondences\") assert __fight(bond, 2.0, incompatible_correspondences, 3.0) for incompatible in incompatible_bonds:", "def important_object_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"relative_importance\", workspace.initial.objects ) descriptors = object_from_initial.relevant_distinguishing_descriptors() slipnode =", "if initial.leftmost or initial.rightmost and target.leftmost or target.rightmost: # search for the incompatible", "def bond_strength_tester(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() strength = bond.total_strength probability = formulas.temperature_adjusted_probability(strength", "bond, \"bonds\", 1.0, 1.0) incompatible_groups = bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups, bond, \"groups\", 1.0, 1.0)", "found\") letter_of_initial_string.replacement = Replacement( letter_of_initial_string, letter_of_modified_string, relation ) if relation != slipnet.sameness: letter_of_initial_string.changed", "in incompatible_groups: incompatible.break_the_structure() for incompatible in incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building bond {bond}\") bond.build_bond() #", "forward_bond == slipnet.identity: forward_bond = slipnet.sameness backward_bond = slipnet.sameness else: backward_bond = destination_descriptor.get_bond_category(source_descriptor)", "WorkspaceObject): return \"target\" if structure.string == workspace.initial: return \"initial\" return \"other\" def __get_scout_source(slipnode,", "incompatible in incompatible_bonds: incompatible.break_the_structure() for incompatible in incompatible_groups: incompatible.break_the_structure() for incompatible in incompatible_correspondences:", "+ unhappinesses) initials = initial_relevance + initial_unhappiness if randomized > initials: string =", "> 0.6: distribution = [2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0,", "None incompatible_group = None # if there is an incompatible bond then fight", "other_objects_of_same_letter = [ o for o in workspace.initial.objects if not o != changed", "# generate a list of distinguishing descriptions for the first object # ie.", "logging.info(\"no first_bond2\") else: logging.info(f\"first_bond2: {first_bond}\") if first_bond and not first_bond.direction_category: direction = None", "assert description sliplinks = formulas.similar_property_links(description.descriptor) assert sliplinks values = [ sliplink.degree_of_association() * sliplink.destination.activation", "== group.direction_category: continue incompatible_bonds += [left_bond] previous = objekt next_object = group.object_list[-1] for", "break incompatible group and bond if they exist if incompatible_bond: incompatible_bond.break_the_structure() if incompatible_group:", "destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination ) forward_bond = source_descriptor.get_bond_category(destination_descriptor) if", "return len([m for m in mappings if m.label != slipnet.opposite]) == 0 def", ".replacement import Replacement from .slipnet import slipnet from .workspace_formulas import choose_bond_facet from .workspace_formulas", "initial string = {letter_of_initial_string}\") if letter_of_initial_string.replacement: logging.info( f\"Replacement already found for {letter_of_initial_string}, so", "object_from_target.relevant_descriptions(), ) flip_target_object = True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def", ") def rule_strength_tester(codelet): rule = codelet.arguments[0] rule.update_strength() probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0) assert", "or whole) or letter category # if it is the only one of", "in group with these bonds search = True while search: search = False", "= not workspace.target.equivalent_group(flipper) else: target_not_flipped = False initial_in_objects = object_from_initial in workspace.objects target_in_objects", "__all_opposite_mappings(mappings): return len([m for m in mappings if m.label != slipnet.opposite]) == 0", "{bond}\") bond.build_bond() # pylint: disable=too-many-branches # pylint: disable=too-many-statements def top_down_group_scout__category(codelet): group_category = codelet.arguments[0]", "direction = None assert first_bond assert first_bond.direction_category == direction logging.info(f\"possible group: {first_bond}\") category", "bonds are there of the same direction incompatible_bonds = [] # incompatible bond", "the only one of its type in the string object_list = [] position", "= [] for node in object_list: depth = node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list", "incompatible bonds exist - fight group.update_strength() assert __fight_incompatibles(incompatible_bonds, group, \"bonds\", 1.0, 1.0) #", "slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond2\")", "formulas.coin_flip(break_probability): return for structure in break_objects: structure.break_the_structure() def bottom_up_description_scout(codelet): chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects)", "changed = changed_objects[-1] # generate a list of distinguishing descriptions for the first", "if description.object.described(description.descriptor): description.description_type.buffer = 100.0 description.descriptor.buffer = 100.0 else: description.build() def bottom_up_bond_scout(codelet): source", "possible_group_bonds(category, direction_category, bond_facet, bonds) assert bonds group_category = category.get_related_node(slipnet.group_category) coderack.propose_group( objects, bonds, group_category,", "bond.string.bonds: if bond.same_neighbours(string_bond) and bond.same_categories(string_bond): if bond.direction_category: bond.direction_category.buffer = 100.0 bond.category.buffer = 100.0", "# search for the incompatible bond incompatible_bond = correspondence.get_incompatible_bond() if incompatible_bond: # bond", "from {source} to {destination}\") objects = [source] bonds = [] while source !=", "Group from .letter import Letter from .replacement import Replacement from .slipnet import slipnet", "or bond.destination in workspace.objects for string_bond in bond.string.bonds: if bond.same_neighbours(string_bond) and bond.same_categories(string_bond): if", "= target.group if incompatible_group: assert __structure_versus_structure( correspondence, 1.0, incompatible_group, 1.0 ) # if", "next_object = group.object_list[-1] for objekt in reversed(group.object_list[:-1]): right_bond = objekt.right_bond if right_bond: if", "not first_bond.direction_category: direction = None if not first_bond or first_bond.direction_category != direction: if", "codelet, ) def top_down_bond_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"bond\"", "and object_from_target.spans_string() and slipnet.direction_category in initial_description_types and __all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation != 100.0 ):", "source_descriptor assert destination_descriptor return source_descriptor, destination_descriptor def __all_opposite_mappings(mappings): return len([m for m in", "all correspondences if incompatibles: correspondence_spans = ( correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span() ) for incompatible", "[slipnet.right.activation] if not formulas.select_list_position(activations): direction = slipnet.left else: direction = slipnet.right if direction", "codelet ) def top_down_description_scout(codelet): description_type = codelet.arguments[0] chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object", "activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): mydirection = slipnet.left else:", "bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() assert bond.source in workspace.objects or bond.destination in workspace.objects", "Correspondence)) ] assert structures structure = random.choice(structures) __show_which_string_object_is_from(structure) break_objects = [structure] if isinstance(structure,", "[slipnet.right.activation] if not formulas.select_list_position(activations): mydirection = slipnet.left else: mydirection = slipnet.right if mydirection", "1.0, 1.0) # fight incompatible groups # fight all groups containing these objects", "destination_descriptor, codelet, ) def rule_scout(codelet): assert workspace.number_of_unreplaced_objects() == 0 changed_objects = [o for", "/ 100.0) logging.info(f\"bond strength = {strength} for {bond}\") assert formulas.coin_flip(probability) bond.facet.buffer = 100.0", "destination = choose_neighbour(source) assert destination logging.info(f\"destination: {destination}\") bond_facet = __get_bond_facet(source, destination) logging.info(f\"chosen bond", "2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0] stop = sum(distribution) * random.random() total", "descriptors if changed.correspondence: target_object = changed.correspondence.object_from_target new_list = [] slippages = workspace.slippages() for", "category, bond_facet, destination_descriptor, source_descriptor, codelet, ) def top_down_bond_scout__direction(codelet): direction = codelet.arguments[0] source =", "letter = changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter = [ o for o in workspace.initial.objects if not", "__get_scout_source( category, formulas.local_bond_category_relevance, \"bond\" ) destination = choose_neighbour(source) logging.info(f\"source: {source}, destination: {destination}\") assert", "weighted_strength2) * random.random() logging.info(f\"{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}\") return weighted_strength1 > rhs", "if structure.source.group: if structure.source.group == structure.destination.group: break_objects += [structure.source.group] # try to break", "0.0 for i in range(0, len(distribution)): total += distribution[i] if total >= stop:", "/ 100.0) assert random.random() <= probability coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength, rule) def replacement_finder(): #", "o in workspace.initial.objects if o.changed] # assert len(changed_objects) < 2 # if there", "destination = object2 else: source = object2 destination = object1 category = group.group_category.get_related_node(slipnet.bond_category)", "objects = [leftmost] while leftmost.right_bond: bonds += [leftmost.right_bond] leftmost = leftmost.right_bond.right_object objects +=", "[leftmost.right_bond] leftmost = leftmost.right_bond.right_object objects += [leftmost] assert leftmost.rightmost # choose a random", "from .slipnet import slipnet from .workspace_formulas import choose_bond_facet from .workspace_formulas import choose_directed_neighbor from", "incompatible_groups: incompatible.break_the_structure() for incompatible in incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building bond {bond}\") bond.build_bond() # pylint:", "return coderack.propose_rule(None, None, None, None, codelet) changed = changed_objects[-1] # generate a list", "formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode initial_descriptor = slipnode for mapping in workspace.slippages(): if mapping.initial_descriptor ==", "== destination.right_bond.facet: bond_facet = destination.right_bond.facet direction = source.right_bond.direction_category destination = destination.right_bond.right_object search =", "formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) flip_target_object = True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings,", "or first_bond.direction_category != direction: if mydirection == slipnet.right: first_bond = source.left_bond else: first_bond", "find rightmost object in group with these bonds search = True destination =", "else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): direction = slipnet.left", "elif density > 0.4: distribution = [1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0,", "bond.same_neighbours(string_bond) and bond.same_categories(string_bond): if bond.direction_category: bond.direction_category.buffer = 100.0 bond.category.buffer = 100.0 logging.info(\"already exists:", "for mapping in workspace.slippages(): if mapping.initial_descriptor == slipnode: initial_descriptor = mapping.target_descriptor target_candidates =", "= None if workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule = workspace.rule assert __structure_versus_structure( correspondence, 1.0,", "+ target_relevance unhappinesses = initial_unhappiness + target_unhappiness randomized = random.random() * (relevances +", "to {destination}\") objects = [source] bonds = [] while source != destination: bonds", "mapping.label: mapping.label.buffer = 100.0 if not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings += [mapping] return incompatibles =", "__show_which_string_object_is_from(chosen_object) descriptions = chosen_object.get_possible_descriptions(description_type) assert descriptions values = [n.activation for n in descriptions]", "= codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() assert bond.source in workspace.objects or bond.destination in workspace.objects for", "to see if bonds are there of the same direction incompatible_bonds = []", "+ 100 temperature.clamped = True formulas.Temperature = 100.0 def bottom_up_correspondence_scout(codelet): object_from_initial = choose_unmodified_object(", "incompatible.break_the_structure() group.build_group() group.activate_descriptions() logging.info(\"building group\") def rule_builder(codelet): rule = codelet.arguments[0] if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions()", ") string = workspace.initial relevances = initial_relevance + target_relevance unhappinesses = initial_unhappiness +", "to \"successor\" or \"d\" object_list = [] if changed.replacement.relation: object_list += [changed.replacement.relation] object_list", "def top_down_group_scout__category(codelet): group_category = codelet.arguments[0] category = group_category.get_related_node(slipnet.bond_category) assert category source = __get_scout_source(", "150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.6:", "workspace.initial.objects ) descriptors = object_from_initial.relevant_distinguishing_descriptors() slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode initial_descriptor = slipnode", "= {int(target_unhappiness)}\" ) string = workspace.initial relevances = initial_relevance + target_relevance unhappinesses =", "descriptors & Fizzle\") return incompatible_bonds = bond.get_incompatible_bonds() logging.info(f\"number of incompatible_bonds: {len(incompatible_bonds)}\") if len(incompatible_bonds):", "logging.info(f\"possible group: {first_bond}\") category = first_bond.category assert category group_category = category.get_related_node(slipnet.group_category) logging.info(f\"trying from", "source.left_bond.direction_category: continue if not bond_facet or bond_facet == source.left_bond.facet: bond_facet = source.left_bond.facet direction", "objekt.leftmost: leftmost = objekt while leftmost.group and leftmost.group.bond_category == slipnet.sameness: leftmost = leftmost.group", "assert __structure_versus_structure( correspondence, correspondence_spans, incompatible, incompatible_spans ) incompatible_bond = None incompatible_group = None", "in concept_mappings if m.distinguishing()] assert distinguishing_mappings # if both objects span the strings,", "= [] while source != destination: bonds += [source.right_bond] objects += [source.right_bond.right_object] source", "not o != changed and o.get_description_type(letter) ] if not len(other_objects_of_same_letter): object_list += [letter]", "{bond_facet.get_name()}\") logging.info(f\"Source: {source}, destination: {destination}\") bond_descriptors = __get_descriptors(bond_facet, source, destination) source_descriptor, destination_descriptor =", "[letter] # if this object corresponds to another object in the workspace #", "distinguishing_mappings if m.initial_description_type == slipnet.string_position_category and m.initial_description_type != slipnet.bond_facet ] initial_description_types = [m.initial_description_type", "bond.direction_category: incompatible_correspondences = bond.get_incompatible_correspondences() if incompatible_correspondences: logging.info(\"trying to break incompatible correspondences\") assert __fight(bond,", "else: first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond\") else: logging.info(f\"first_bond: {first_bond}\") if", "is an incompatible rule, fight against it incompatible_rule = None if workspace.rule: if", "in workspace.initial.objects if not o != changed and o.get_description_type(letter) ] if not len(other_objects_of_same_letter):", "= [ s for s in workspace.structures if isinstance(s, (Group, Bond, Correspondence)) ]", "= letter_of_initial_string logging.info(\"building replacement\") def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category = codelet.arguments[0] source = __get_scout_source(", "choose_neighbour(source) logging.info(f\"source: {source}, destination: {destination}\") assert destination bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor", "probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0) assert random.random() <= probability coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength, rule)", "correspondence.flip_target_object if want_flip: flipper = object_from_target.flipped_version() target_not_flipped = not workspace.target.equivalent_group(flipper) else: target_not_flipped =", "100.0 bond.category.buffer = 100.0 logging.info(\"already exists: activate descriptors & Fizzle\") return incompatible_bonds =", "random from . import formulas from . import temperature from .bond import Bond", "- fight group.update_strength() assert __fight_incompatibles(incompatible_bonds, group, \"bonds\", 1.0, 1.0) # fight incompatible groups", "o for o in workspace.initial.objects if not o != changed and o.get_description_type(letter) ]", "# assert len(changed_objects) < 2 # if there are no changed objects, propose", "bond.direction_category: bond.direction_category.buffer = 100.0 bond.category.buffer = 100.0 logging.info(\"already exists: activate descriptors & Fizzle\")", "object_from_target, concept_mappings, flip_target_object, codelet, ) def correspondence_strength_tester(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial", "else: target_not_flipped = False initial_in_objects = object_from_initial in workspace.objects target_in_objects = object_from_target in", "= 100.0 mapping.target_description_type.buffer = 100.0 mapping.target_descriptor.buffer = 100.0 coderack.new_codelet(\"correspondence-builder\", codelet, strength, correspondence) def", "target_in_objects = object_from_target in workspace.objects assert initial_in_objects or ( not target_in_objects and (not", "source.right_bond if not first_bond: logging.info(\"no first_bond\") else: logging.info(f\"first_bond: {first_bond}\") if first_bond and not", "def __fight_incompatibles( incompatibles, structure, name, structure_weight, incompatible_weight ): if len(incompatibles): if __fight(structure, structure_weight,", "group.update_strength() assert __fight_incompatibles(incompatible_bonds, group, \"bonds\", 1.0, 1.0) # fight incompatible groups # fight", "Bond from .bond import possible_group_bonds from .coderack import coderack from .correspondence import Correspondence", "object group = leftmost coderack.propose_group( group.object_list, group.bond_list, group.group_category, group.direction_category, group.facet, codelet, ) return", "destination = source search = True while search: search = False if not", "source.rightmost: direction = slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not", "> 0.8: distribution = [5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0,", "1.0] else: distribution = [1.0, 1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0,", "= [1.0, 1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0] stop =", "other side of object if direction == slipnet.right: first_bond = source.left_bond else: first_bond", "{destination}\") assert destination bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source,", "description.object in workspace.objects if description.object.described(description.descriptor): description.description_type.buffer = 100.0 description.descriptor.buffer = 100.0 else: description.build()", "workspace.initial.objects ) object_from_target = choose_unmodified_object( \"inter_string_salience\", workspace.target.objects ) assert object_from_initial.spans_string() == object_from_target.spans_string() #", "concept mappings # and add new ones to the existing corr. existing =", "= True workspace.changed_object = letter_of_initial_string logging.info(\"building replacement\") def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category = codelet.arguments[0]", "\"unstructured\" if isinstance(structure, WorkspaceObject): return \"target\" if structure.string == workspace.initial: return \"initial\" return", "= __get_scout_source( direction, formulas.local_direction_category_relevance, \"bond\" ) destination = choose_directed_neighbor(source, direction) assert destination logging.info(f\"to", "source, destination, category, facet, source.get_descriptor(facet), destination.get_descriptor(facet), ) new_bond.build_bond() group.bond_list += [object1.right_bond] for incompatible", "{category.name}\") bond_facet = None # find leftmost object in group with these bonds", "incompatible group and bond if they exist if incompatible_bond: incompatible_bond.break_the_structure() if incompatible_group: incompatible_group.break_the_structure()", "source = __get_scout_source( category, formulas.local_bond_category_relevance, \"group\" ) assert source assert not source.spans_string() if", "= object_from_target.flipped_version() target_not_flipped = not workspace.target.equivalent_group(flipper) else: target_not_flipped = False initial_in_objects = object_from_initial", "= slipnet.sameness else: backward_bond = destination_descriptor.get_bond_category(source_descriptor) assert category in [forward_bond, backward_bond] if category", ") def bond_strength_tester(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() strength = bond.total_strength probability =", "assert __fight(bond, 2.0, incompatible_correspondences, 3.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() for incompatible in", ") if relation != slipnet.sameness: letter_of_initial_string.changed = True workspace.changed_object = letter_of_initial_string logging.info(\"building replacement\")", "1.0 else: number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds) nearly_total_length = len(workspace.initial) + len(workspace.target) -", "if len(incompatibles): if __fight(structure, structure_weight, incompatibles, incompatible_weight): logging.info(f\"broke the {name}\") return True logging.info(f\"failed", "def correspondence_builder(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target want_flip =", "len(workspace.target) - 2 bond_density = number_of_bonds / nearly_total_length if bond_density > 1.0: bond_density", "first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond or first_bond.category !=", "update strength value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength() strength =", "object_from_target.relevant_descriptions(), ) assert concept_mappings assert __slippability(concept_mappings) # find out if any are distinguishing", "return True def __fight_incompatibles( incompatibles, structure, name, structure_weight, incompatible_weight ): if len(incompatibles): if", "return incompatible_bonds = bond.get_incompatible_bonds() logging.info(f\"number of incompatible_bonds: {len(incompatible_bonds)}\") if len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds,", "for incompatible in incompatible_groups: incompatible.break_the_structure() group.build_group() group.activate_descriptions() logging.info(\"building group\") def rule_builder(codelet): rule =", "True # find rightmost object in group with these bonds search = True", "object_from_target.flipped_version() concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) flip_target_object = True coderack.propose_correspondence(", "destination.right_bond.facet: bond_facet = destination.right_bond.facet direction = source.right_bond.direction_category destination = destination.right_bond.right_object search = True", "mapping.target_description_type.buffer = 100.0 mapping.target_descriptor.buffer = 100.0 coderack.new_codelet(\"correspondence-builder\", codelet, strength, correspondence) def correspondence_builder(codelet): correspondence", "not object1.right_bond: if group.direction_category == slipnet.right: source = object1 destination = object2 else:", "changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] # use conceptual depth to choose a relation value_list = []", "for the first object # ie. string-position (left-,right-most,middle or whole) or letter category", "destination ) forward_bond = source_descriptor.get_bond_category(destination_descriptor) if forward_bond == slipnet.identity: forward_bond = slipnet.sameness backward_bond", "incompatible, incompatible_weight ): logging.info(f\"lost fight with {incompatible}\") return False logging.info(f\"won fight with {incompatible}\")", "object_list = [] position = changed.get_descriptor(slipnet.string_position_category) if position: object_list += [position] letter =", "incompatible in incompatible_groups: incompatible.break_the_structure() group.build_group() group.activate_descriptions() logging.info(\"building group\") def rule_builder(codelet): rule = codelet.arguments[0]", "-1: slipnet.successor, 1: slipnet.predecessor} relation = relations[diff] logging.info(f\"Relation found: {relation.name}\") else: relation =", "probability_of_slippage = formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage): return True return False # start the actual", "string - propose this object group = leftmost coderack.propose_group( group.object_list, group.bond_list, group.group_category, group.direction_category,", "assert o in workspace.objects # check to see if bonds are there of", "= __get_descriptors(bond_facet, source, destination) source_descriptor, destination_descriptor = bond_descriptors logging.info(f\"source descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination descriptor:", "source.left_bond else: first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond\") else: logging.info(f\"first_bond: {first_bond}\")", "bonds = [] while source != destination: bonds += [source.right_bond] objects += [source.right_bond.right_object]", "= bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups, bond, \"groups\", 1.0, 1.0) # fight all incompatible correspondences", "Replacement from .slipnet import slipnet from .workspace_formulas import choose_bond_facet from .workspace_formulas import choose_directed_neighbor", "i = formulas.select_list_position(value_list) descriptor = object_list[i] # choose the relation (change the letmost", "selected: {workspace.target}\") else: logging.info(f\"initial string selected: {workspace.initial}\") # find leftmost object & the", "{target_relevance}, \" f\"unhappiness = {int(target_unhappiness)}\" ) string = workspace.initial relevances = initial_relevance +", "randomized = random.random() * (relevances + unhappinesses) initials = initial_relevance + initial_unhappiness if", "slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond or first_bond.category", "destination.right_bond.right_object search = True assert destination != source objects = [source] bonds =", "= {initial_relevance}, \" f\"unhappiness = {int(initial_unhappiness)}\" ) logging.info( f\"target : relevance = {target_relevance},", "+= [position] letter = changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter = [ o for o in workspace.initial.objects", "check to see if bonds are there of the same direction incompatible_bonds =", "slipnet.opposite]) == 0 def __structure_versus_structure(structure1, weight1, structure2, weight2): structure1.update_strength() structure2.update_strength() weighted_strength1 = formulas.temperature_adjusted_value(", "== slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond or", "bond.update_strength() assert bond.source in workspace.objects or bond.destination in workspace.objects for string_bond in bond.string.bonds:", "correspondences if incompatibles: correspondence_spans = ( correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span() ) for incompatible in", "return False logging.info(f\"no incompatible {name}\") return True def __slippability(concept_mappings): for mapping in concept_mappings:", "the string - propose this object group = leftmost coderack.propose_group( group.object_list, group.bond_list, group.group_category,", "existing = correspondence.object_from_initial.correspondence for mapping in correspondence.concept_mappings: if mapping.label: mapping.label.buffer = 100.0 if", "else: coderack.propose_bond( destination, source, category, bond_facet, destination_descriptor, source_descriptor, codelet, ) def top_down_bond_scout__direction(codelet): direction", "correspondence.object_from_target assert object_from_initial in workspace.objects assert ( object_from_target in workspace.objects or correspondence.flip_target_object and", "in workspace.objects or correspondence.flip_target_object and not workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength() strength = correspondence.total_strength probability", "logging.info(f\"to object: {destination}\") bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source,", "destination, category, facet, source.get_descriptor(facet), destination.get_descriptor(facet), ) new_bond.build_bond() group.bond_list += [object1.right_bond] for incompatible in", "node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) relation = object_list[i]", "in descriptions] i = formulas.select_list_position(values) chosen_property = descriptions[i] coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet", "total = 0.0 for i in range(0, len(distribution)): total += distribution[i] if total", "bond.destination_descriptor.buffer = 100.0 logging.info(\"succeeded: posting bond-builder\") coderack.new_codelet(\"bond-builder\", codelet, strength) def bond_builder(codelet): bond =", "probability # it is strong enough - post builder & activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer", "source.right_bond.direction_category destination = destination.right_bond.right_object search = True assert destination != source logging.info(f\"proposing group", "probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability # it is strong", "source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"direction\" ) logging.info(f\"source chosen = {source}\") assert not", "else: first_bond = source.right_bond if not first_bond or first_bond.category != category: if category", "bonds, group_category, direction_category, bond_facet, codelet ) def group_strength_tester(codelet): # update strength value of", "unhappinesses = initial_unhappiness + target_unhappiness randomized = random.random() * (relevances + unhappinesses) initials", "bond_descriptors = __get_descriptors(bond_facet, source, destination) source_descriptor, destination_descriptor = bond_descriptors logging.info(f\"source descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination", "[ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] # use conceptual depth to choose a relation value_list =", "first_bond.direction_category search = True bond_facet = None # find leftmost object in group", "= True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def important_object_correspondence_scout(codelet): object_from_initial =", "assert source_descriptor assert destination_descriptor return source_descriptor, destination_descriptor def __all_opposite_mappings(mappings): return len([m for m", "= random.choice(structures) __show_which_string_object_is_from(structure) break_objects = [structure] if isinstance(structure, Bond): if structure.source.group: if structure.source.group", "direction, formulas.local_direction_category_relevance, \"bond\" ) destination = choose_directed_neighbor(source, direction) assert destination logging.info(f\"to object: {destination}\")", "bond_facet = None # find leftmost object in group with these bonds while", "chosen_bond.direction_category bond_facet = chosen_bond.facet bonds = possible_group_bonds(category, direction_category, bond_facet, bonds) assert bonds group_category", "bottom_up_description_scout(codelet): chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) description = formulas.choose_relevant_description_by_activation(chosen_object) assert description", "\"successor\" or \"d\" object_list = [] if changed.replacement.relation: object_list += [changed.replacement.relation] object_list +=", "category source = __get_scout_source( category, formulas.local_bond_category_relevance, \"group\" ) assert source assert not source.spans_string()", "destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) else: coderack.propose_bond( destination, source, category, bond_facet,", "!= slipnet.bond_facet ] initial_description_types = [m.initial_description_type for m in opposites] flip_target_object = False", "slipnet.opposite.activation != 100.0 ): object_from_target = object_from_target.flipped_version() concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(),", "target.group if incompatible_group: assert __structure_versus_structure( correspondence, 1.0, incompatible_group, 1.0 ) # if there", "object # ie. string-position (left-,right-most,middle or whole) or letter category # if it", "= codelet.arguments[0] source = __get_scout_source( category, formulas.local_bond_category_relevance, \"bond\" ) destination = choose_neighbour(source) logging.info(f\"source:", "the highest group to which it belongs leftmost = None for objekt in", "rule.update_strength() assert rule.total_strength # fight against other rules if workspace.rule: assert __structure_versus_structure(rule, 1.0,", "100.0) assert random.random() <= probability # activate some concepts for mapping in correspondence.concept_mappings:", "out if any are distinguishing distinguishing_mappings = [m for m in concept_mappings if", "= False initial_in_objects = object_from_initial in workspace.objects target_in_objects = object_from_target in workspace.objects assert", "import possible_group_bonds from .coderack import coderack from .correspondence import Correspondence from .group import", "value = formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) descriptor = object_list[i] #", "return direction = first_bond.direction_category search = True bond_facet = None # find leftmost", "and leftmost.group.bond_category == slipnet.sameness: leftmost = leftmost.group if leftmost.spans_string(): # the object already", "if workspace.rule: assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0) workspace.build_rule(rule) def __get_cut_off(density): if density >", "def bottom_up_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects ) object_from_target = choose_unmodified_object( \"inter_string_salience\", workspace.target.objects", "incompatible_spans = ( incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span() ) assert __structure_versus_structure( correspondence, correspondence_spans, incompatible, incompatible_spans", "logging.info(f\"building bond {bond}\") bond.build_bond() # pylint: disable=too-many-branches # pylint: disable=too-many-statements def top_down_group_scout__category(codelet): group_category", "for n in descriptions] i = formulas.select_list_position(values) chosen_property = descriptions[i] coderack.propose_description( chosen_object, chosen_property.category(),", "object & the highest group to which it belongs leftmost = None for", "rule, fight against it incompatible_rule = None if workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule =", "structure, name, structure_weight, incompatible_weight ): if len(incompatibles): if __fight(structure, structure_weight, incompatibles, incompatible_weight): logging.info(f\"broke", "source = source.left_bond.left_object search = True destination = source search = True while", "s in workspace.structures if isinstance(s, (Group, Bond, Correspondence)) ] assert structures structure =", "codelet, strength) def bond_builder(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() assert bond.source in workspace.objects", "and (not (want_flip and target_not_flipped)) ) if correspondence.reflexive(): # if the correspondence exists,", "100.0 mapping.target_descriptor.buffer = 100.0 coderack.new_codelet(\"correspondence-builder\", codelet, strength, correspondence) def correspondence_builder(codelet): correspondence = codelet.arguments[0]", "if not formulas.select_list_position(activations): mydirection = slipnet.left else: mydirection = slipnet.right if mydirection ==", "logging.info(f\"bond strength = {strength} for {bond}\") assert formulas.coin_flip(probability) bond.facet.buffer = 100.0 bond.source_descriptor.buffer =", "first_bond or first_bond.category != category: if category == slipnet.sameness and isinstance(source, Letter): group", "number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds) nearly_total_length = len(workspace.initial) + len(workspace.target) - 2 bond_density", "m in concept_mappings if m.distinguishing()] assert distinguishing_mappings # if both objects span the", "> rhs}\") return weighted_strength1 > rhs def __fight(structure, structure_weight, incompatibles, incompatible_weight): if not", "1.0) workspace.build_rule(rule) def __get_cut_off(density): if density > 0.8: distribution = [5.0, 150.0, 5.0,", "\"inter_string_salience\", workspace.target.objects ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get the posible concept mappings", "in incompatibles: incompatible_spans = ( incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span() ) assert __structure_versus_structure( correspondence, correspondence_spans,", "destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def rule_scout(codelet): assert workspace.number_of_unreplaced_objects() == 0", "bond_facet or bond_facet == source.left_bond.facet: bond_facet = source.left_bond.facet direction = source.left_bond.direction_category source =", "assert source assert not source.spans_string() if source.leftmost: direction = slipnet.right elif source.rightmost: direction", "= changed.get_descriptor(slipnet.string_position_category) if position: object_list += [position] letter = changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter = [", "for mapping in concept_mappings: slippiness = mapping.slippability() / 100.0 probability_of_slippage = formulas.temperature_adjusted_probability(slippiness) if", "object_list: node = node.apply_slippages(slippages) if target_object.described(node): if target_object.distinguishing_descriptor(node): new_list += [node] object_list =", "string-position (left-,right-most,middle or whole) or letter category # if it is the only", "workspace.modified.objects if isinstance(o, Letter) and o.left_index == position ] letter_of_modified_string = more_letters and", "if both objects span the strings, check to see if the # string", "destination_descriptor = bond_descriptors logging.info(f\"source descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\") category = source_descriptor.get_bond_category(destination_descriptor) assert", "of incompatible_bonds: {len(incompatible_bonds)}\") if len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds, bond, \"bonds\", 1.0, 1.0) incompatible_groups", "same direction incompatible_bonds = [] # incompatible bond list if len(group.object_list) > 1:", "in workspace.objects assert ( object_from_target in workspace.objects or correspondence.flip_target_object and not workspace.target.equivalent_group(object_from_target.flipped_version()) )", "= chosen_object.get_possible_descriptions(description_type) assert descriptions values = [n.activation for n in descriptions] i =", "def __get_scout_source(slipnode, relevance_method, type_name): initial_relevance = relevance_method(workspace.initial, slipnode) target_relevance = relevance_method(workspace.target, slipnode) initial_unhappiness", "and not first_bond.direction_category: direction = None assert first_bond assert first_bond.direction_category == direction logging.info(f\"possible", "backward_bond = slipnet.sameness else: backward_bond = destination_descriptor.get_bond_category(source_descriptor) assert category in [forward_bond, backward_bond] if", "<= probability coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength, rule) def replacement_finder(): # choose random letter in", "strength value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) equivalent = group.string.equivalent_group(group) if", "if len(group.object_list) > 1: previous = group.object_list[0] for objekt in group.object_list[1:]: left_bond =", "nodes group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0 if group.direction_category: group.direction_category.buffer = 100.0 coderack.new_codelet(\"group-builder\", codelet, strength) def", "def top_down_description_scout(codelet): description_type = codelet.arguments[0] chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) descriptions", "structure in break_objects: break_probability = formulas.temperature_adjusted_probability( structure.total_strength / 100.0 ) if formulas.coin_flip(break_probability): return", "selected: {workspace.initial}\") # find leftmost object & the highest group to which it", "codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"direction\" ) logging.info(f\"source chosen = {source}\") assert", "incompatible in incompatibles: incompatible_spans = ( incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span() ) assert __structure_versus_structure( correspondence,", "object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) assert concept_mappings assert __slippability(concept_mappings) # find out if any", "correspondence.object_from_initial target = correspondence.object_from_target if initial.leftmost or initial.rightmost and target.leftmost or target.rightmost: #", "assert category if category == slipnet.identity: category = slipnet.sameness logging.info(f\"proposing {category.name} bond \")", "= formulas.temperature_adjusted_probability(rule.total_strength / 100.0) assert random.random() <= probability coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength, rule) def", "bond_facet or bond_facet == destination.right_bond.facet: bond_facet = destination.right_bond.facet direction = source.right_bond.direction_category destination =", "if m.distinguishing()] assert distinguishing_mappings # if both objects span the strings, check to", "workspace.rule, 1.0) workspace.build_rule(rule) def __get_cut_off(density): if density > 0.8: distribution = [5.0, 150.0,", "assert object_from_initial.spans_string() == object_from_target.spans_string() # get the posible concept mappings concept_mappings = formulas.get_mappings(", "slipnode) target_relevance = relevance_method(workspace.target, slipnode) initial_unhappiness = workspace.initial.intra_string_unhappiness target_unhappiness = workspace.target.intra_string_unhappiness logging.info( f\"initial", "destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination ) category = source_descriptor.get_bond_category(destination_descriptor) assert", "not structure: return \"unstructured\" if isinstance(structure, WorkspaceObject): return \"target\" if structure.string == workspace.initial:", "target_candidates += [objekt] assert target_candidates object_from_target = choose_unmodified_object( \"inter_string_salience\", target_candidates ) assert object_from_initial.spans_string()", "object_list += [changed.replacement.relation] object_list += [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] # use conceptual depth to", "for o in group.object_list: assert o in workspace.objects # check to see if", "( incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span() ) assert __structure_versus_structure( correspondence, correspondence_spans, incompatible, incompatible_spans ) incompatible_bond", "direction) assert destination logging.info(f\"to object: {destination}\") bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor =", "slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond\")", "all groups containing these objects incompatible_groups = group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups, group, \"Groups\", 1.0,", "type in the string object_list = [] position = changed.get_descriptor(slipnet.string_position_category) if position: object_list", "see if bonds are there of the same direction incompatible_bonds = [] #", "and len(workspace.target) == 1: bond_density = 1.0 else: number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds)", "against it assert __structure_versus_structure( correspondence, 3.0, incompatible_bond, 2.0 ) # won against incompatible", "initial_ascii = ord(workspace.initial_string[position]) modified_ascii = ord(workspace.modified_string[position]) diff = initial_ascii - modified_ascii if abs(diff)", "first_bond = source.right_bond if not first_bond or first_bond.category != category: if category ==", "if relation != slipnet.sameness: letter_of_initial_string.changed = True workspace.changed_object = letter_of_initial_string logging.info(\"building replacement\") def", "letter_of_initial_string = random.choice(letters) logging.info(f\"selected letter in initial string = {letter_of_initial_string}\") if letter_of_initial_string.replacement: logging.info(", "assert __structure_versus_structure( correspondence, 3.0, incompatible_bond, 2.0 ) # won against incompatible bond incompatible_group", "against it incompatible_rule = None if workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule = workspace.rule assert", "if incompatible_correspondences: logging.info(\"trying to break incompatible correspondences\") assert __fight(bond, 2.0, incompatible_correspondences, 3.0) for", "distribution = [5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif", "and bond if they exist if incompatible_bond: incompatible_bond.break_the_structure() if incompatible_group: incompatible_group.break_the_structure() if incompatible_rule:", "else: source = object2 destination = object1 category = group.group_category.get_related_node(slipnet.bond_category) facet = group.facet", "correspondence_builder(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target want_flip = correspondence.flip_target_object", "correspondence exists, activate concept mappings # and add new ones to the existing", "(100.0 - formulas.Temperature) / 100.0 assert not formulas.coin_flip(probability_of_fizzle) # choose a structure at", "and not workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength() strength = correspondence.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0)", "descriptions[i] coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def description_strength_tester(codelet): description = codelet.arguments[0] description.descriptor.buffer", "opposites] flip_target_object = False if ( object_from_initial.spans_string() and object_from_target.spans_string() and slipnet.direction_category in initial_description_types", "not source.left_bond: continue if source.left_bond.category != category: continue if source.left_bond.direction_category != direction: if", "find leftmost object in group with these bonds while search: search = False", "found - fight against it assert __structure_versus_structure( correspondence, 3.0, incompatible_bond, 2.0 ) #", "= True assert destination != source objects = [source] bonds = [] while", "rule_strength_tester(codelet): rule = codelet.arguments[0] rule.update_strength() probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0) assert random.random() <=", "leftmost.right_bond.right_object objects += [leftmost] assert leftmost.rightmost # choose a random bond from list", "len(distribution) def rule_translator(): assert workspace.rule if len(workspace.initial) == 1 and len(workspace.target) == 1:", "from .bond import possible_group_bonds from .coderack import coderack from .correspondence import Correspondence from", "source, destination ) category = source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category", ") probability = group.single_letter_group_probability() assert random.random() >= probability coderack.propose_single_letter_group(source, codelet) return direction =", "source.left_bond.left_object search = True # find rightmost object in group with these bonds", "leftmost.right_bond: bonds += [leftmost.right_bond] leftmost = leftmost.right_bond.right_object objects += [leftmost] assert leftmost.rightmost #", "bond_facet = source.left_bond.facet direction = source.left_bond.direction_category source = source.left_bond.left_object search = True #", "category, facet, source.get_descriptor(facet), destination.get_descriptor(facet), ) new_bond.build_bond() group.bond_list += [object1.right_bond] for incompatible in incompatible_groups:", "= 100.0 mapping.initial_descriptor.buffer = 100.0 mapping.target_description_type.buffer = 100.0 mapping.target_descriptor.buffer = 100.0 coderack.new_codelet(\"correspondence-builder\", codelet,", "bonds, group_category, direction, bond_facet, codelet ) # noinspection PyStringFormat def group_scout__whole_string(codelet): string =", "in workspace.initial.objects if o.changed] # assert len(changed_objects) < 2 # if there are", "logging.info(\"building replacement\") def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category = codelet.arguments[0] source = __get_scout_source( category, formulas.local_bond_category_relevance,", "bond.source_descriptor.buffer = 100.0 bond.destination_descriptor.buffer = 100.0 logging.info(\"succeeded: posting bond-builder\") coderack.new_codelet(\"bond-builder\", codelet, strength) def", "in incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building bond {bond}\") bond.build_bond() # pylint: disable=too-many-branches # pylint: disable=too-many-statements", "weight2): structure1.update_strength() structure2.update_strength() weighted_strength1 = formulas.temperature_adjusted_value( structure1.total_strength * weight1 ) weighted_strength2 = formulas.temperature_adjusted_value(", "if not bond_facet or bond_facet == source.left_bond.facet: bond_facet = source.left_bond.facet direction = source.left_bond.direction_category", "source.leftmost: direction = slipnet.right elif source.rightmost: direction = slipnet.left else: activations = [slipnet.left.activation]", "{workspace.target} for {type_name}\") else: logging.info(f\"initial string selected: {workspace.initial} for {type_name}\") source = choose_unmodified_object(\"intra_string_salience\",", "# pylint: disable=too-many-branches # pylint: disable=too-many-statements def top_down_group_scout__category(codelet): group_category = codelet.arguments[0] category =", "__get_scout_source(slipnode, relevance_method, type_name): initial_relevance = relevance_method(workspace.initial, slipnode) target_relevance = relevance_method(workspace.target, slipnode) initial_unhappiness =", "= [structure] if isinstance(structure, Bond): if structure.source.group: if structure.source.group == structure.destination.group: break_objects +=", "direction: if mydirection == slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond if", ") incompatible_bond = None incompatible_group = None # if there is an incompatible", "to see if the # string description needs to be flipped opposites =", "group.direction_category, group.facet, codelet, ) return bonds = [] objects = [leftmost] while leftmost.right_bond:", "position ] letter_of_modified_string = more_letters and more_letters[0] or None assert letter_of_modified_string position -=", "return \"unstructured\" if isinstance(structure, WorkspaceObject): return \"target\" if structure.string == workspace.initial: return \"initial\"", "- modified_ascii if abs(diff) < 2: relations = {0: slipnet.sameness, -1: slipnet.successor, 1:", "None # find leftmost object in group with these bonds search = True", "== structure.destination.group: break_objects += [structure.source.group] # try to break all objects for structure", "group.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability # it is", "coderack.propose_group( objects, bonds, group_category, direction_category, bond_facet, codelet ) def group_strength_tester(codelet): # update strength", "group_scout__whole_string(codelet): string = workspace.initial if random.random() > 0.5: string = workspace.target logging.info(f\"target string", "# update strength value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength() strength", ") def top_down_bond_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"bond\" )", "if right_bond.right_object == next_object: continue if right_bond.direction_category == group.direction_category: continue incompatible_bonds += [right_bond]", "def __all_opposite_mappings(mappings): return len([m for m in mappings if m.label != slipnet.opposite]) ==", "= group_category.get_related_node(slipnet.bond_category) assert category source = __get_scout_source( category, formulas.local_bond_category_relevance, \"group\" ) assert source", "and slipnet.opposite.activation != 100.0 ): object_from_target = object_from_target.flipped_version() concept_mappings = formulas.get_mappings( object_from_initial, object_from_target,", "slipnet.identity: category = slipnet.sameness logging.info(f\"proposing {category.name} bond \") coderack.propose_bond( source, destination, category, bond_facet,", "- 2 bond_density = number_of_bonds / nearly_total_length if bond_density > 1.0: bond_density =", "slipnet.right elif source.rightmost: mydirection = slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation]", "# check the other side of object if direction == slipnet.right: first_bond =", "1.0, 1.0, 1.0] elif density > 0.4: distribution = [1.0, 2.0, 5.0, 150.0,", "if not (incompatibles and len(incompatibles)): return True for incompatible in incompatibles: if not", "update strength value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) equivalent = group.string.equivalent_group(group)", "group_strength_tester(codelet): # update strength value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength()", "codelet, ) def correspondence_strength_tester(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target", "assert destination logging.info(f\"destination: {destination}\") bond_facet = __get_bond_facet(source, destination) logging.info(f\"chosen bond facet: {bond_facet.get_name()}\") logging.info(f\"Source:", "the # string description needs to be flipped opposites = [ m for", "= source.right_bond if not first_bond or first_bond.category != category: if category == slipnet.sameness", "(not (want_flip and target_not_flipped)) ) if correspondence.reflexive(): # if the correspondence exists, activate", "logging.info(f\"proposing {category.name} bond \") coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, )", "if destination.right_bond.direction_category: continue if not bond_facet or bond_facet == destination.right_bond.facet: bond_facet = destination.right_bond.facet", ") flip_target_object = True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def important_object_correspondence_scout(codelet):", "isinstance(o, Letter)] letter_of_initial_string = random.choice(letters) logging.info(f\"selected letter in initial string = {letter_of_initial_string}\") if", "object_from_initial.spans_string() == object_from_target.spans_string() # get the posible concept mappings concept_mappings = formulas.get_mappings( object_from_initial,", "structure at random structures = [ s for s in workspace.structures if isinstance(s,", "< 2: relations = {0: slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor} relation = relations[diff]", "= 100.0 if group.direction_category: group.direction_category.buffer = 100.0 coderack.new_codelet(\"group-builder\", codelet, strength) def group_builder(codelet): #", "cutoff >= formulas.actual_temperature if workspace.rule.build_translated_rule(): workspace.found_answer = True else: temperature.clamp_time = coderack.codelets_run +", "group_category, direction, bond_facet, codelet ) # noinspection PyStringFormat def group_scout__whole_string(codelet): string = workspace.initial", "= workspace.initial if random.random() > 0.5: string = workspace.target logging.info(f\"target string selected: {workspace.target}\")", "# find leftmost object in group with these bonds search = True while", "mydirection == slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond:", "5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0] else: distribution = [1.0, 1.0, 1.0,", "for incompatible in incompatibles: if not __structure_versus_structure( structure, structure_weight, incompatible, incompatible_weight ): logging.info(f\"lost", "__get_bond_facet(source, destination): bond_facet = choose_bond_facet(source, destination) assert bond_facet return bond_facet def __get_descriptors(bond_facet, source,", "import choose_unmodified_object from .workspace_formulas import workspace from .workspace_object import WorkspaceObject # some methods", "= [] for i in range(1, len(group.object_list)): object1 = group.object_list[i - 1] object2", "assert category group_category = category.get_related_node(slipnet.group_category) logging.info(f\"trying from {source} to {category.name}\") bond_facet = None", "slipnode for mapping in workspace.slippages(): if mapping.initial_descriptor == slipnode: initial_descriptor = mapping.target_descriptor target_candidates", "formulas.temperature_adjusted_value( structure1.total_strength * weight1 ) weighted_strength2 = formulas.temperature_adjusted_value( structure2.total_strength * weight2 ) rhs", "union of this and the distingushing descriptors if changed.correspondence: target_object = changed.correspondence.object_from_target new_list", "2.0, 1.0, 1.0] stop = sum(distribution) * random.random() total = 0.0 for i", "right_bond.right_object == next_object: continue if right_bond.direction_category == group.direction_category: continue incompatible_bonds += [right_bond] next_object", "for {letter_of_initial_string}, so fizzling\" ) return position = letter_of_initial_string.left_index more_letters = [ o", "rule.update_strength() probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0) assert random.random() <= probability coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength,", "against other rules if workspace.rule: assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0) workspace.build_rule(rule) def __get_cut_off(density):", "1.0, 1.0, 1.0, 1.0] elif density > 0.4: distribution = [1.0, 2.0, 5.0,", "/ 100.0) assert random.random() <= probability # activate some concepts for mapping in", "objects, propose a rule with no changes if not changed_objects: return coderack.propose_rule(None, None,", "source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination ) category = source_descriptor.get_bond_category(destination_descriptor) assert category", "bonds search = True while search: search = False if not source.left_bond: continue", "if density > 0.8: distribution = [5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0,", "codelet ) def top_down_group_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"direction\"", "= source.right_bond if not first_bond or first_bond.category != category: # check the other", "objects are still there for o in group.object_list: assert o in workspace.objects #", "the relation (change the letmost object to \"successor\" or \"d\" object_list = []", "100.0 logging.info(\"succeeded: posting bond-builder\") coderack.new_codelet(\"bond-builder\", codelet, strength) def bond_builder(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond)", "search = False if not destination.right_bond: continue if destination.right_bond.category != category: continue if", "relevance_method(workspace.target, slipnode) initial_unhappiness = workspace.initial.intra_string_unhappiness target_unhappiness = workspace.target.intra_string_unhappiness logging.info( f\"initial : relevance =", "relation value_list = [] for node in object_list: depth = node.conceptual_depth value =", "__get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination ) forward_bond = source_descriptor.get_bond_category(destination_descriptor)", "logging.info(\"no relation found\") letter_of_initial_string.replacement = Replacement( letter_of_initial_string, letter_of_modified_string, relation ) if relation !=", "get the posible concept mappings concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), )", "__show_which_string_object_is_from(structure) break_objects = [structure] if isinstance(structure, Bond): if structure.source.group: if structure.source.group == structure.destination.group:", "\"relative_importance\", workspace.initial.objects ) descriptors = object_from_initial.relevant_distinguishing_descriptors() slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode initial_descriptor =", "+ target_unhappiness randomized = random.random() * (relevances + unhappinesses) initials = initial_relevance +", "destination_descriptor return source_descriptor, destination_descriptor def __all_opposite_mappings(mappings): return len([m for m in mappings if", "workspace.objects or correspondence.flip_target_object and not workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength() strength = correspondence.total_strength probability =", "destination ) category = source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category =", "# fight all groups containing these objects incompatible_groups = group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups, group,", "rule.total_strength, rule) def replacement_finder(): # choose random letter in initial string letters =", "fight all groups containing these objects incompatible_groups = group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups, group, \"Groups\",", "Bond, Correspondence)) ] assert structures structure = random.choice(structures) __show_which_string_object_is_from(structure) break_objects = [structure] if", "choose_neighbour from .workspace_formulas import choose_unmodified_object from .workspace_formulas import workspace from .workspace_object import WorkspaceObject", "[position] letter = changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter = [ o for o in workspace.initial.objects if", "direction logging.info(f\"possible group: {first_bond}\") category = first_bond.category assert category group_category = category.get_related_node(slipnet.group_category) logging.info(f\"trying", "workspace from .workspace_object import WorkspaceObject # some methods common to the codelets def", "i + 1 return len(distribution) def rule_translator(): assert workspace.rule if len(workspace.initial) == 1", "mapping in workspace.slippages(): if mapping.initial_descriptor == slipnode: initial_descriptor = mapping.target_descriptor target_candidates = []", "# check to see if all objects are still there for o in", "incompatible_correspondences = [] if bond.left_object.leftmost or bond.right_object.rightmost: if bond.direction_category: incompatible_correspondences = bond.get_incompatible_correspondences() if", "= {int(initial_unhappiness)}\" ) logging.info( f\"target : relevance = {target_relevance}, \" f\"unhappiness = {int(target_unhappiness)}\"", "incompatible {name}\") return True def __slippability(concept_mappings): for mapping in concept_mappings: slippiness = mapping.slippability()", "for i in range(1, len(group.object_list)): object1 = group.object_list[i - 1] object2 = group.object_list[i]", "+= [slipnet.right.activation] if not formulas.select_list_position(activations): direction = slipnet.left else: direction = slipnet.right if", "backward_bond] if category == forward_bond: coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet,", "else: temperature.clamp_time = coderack.codelets_run + 100 temperature.clamped = True formulas.Temperature = 100.0 def", "in correspondence.concept_mappings: mapping.initial_description_type.buffer = 100.0 mapping.initial_descriptor.buffer = 100.0 mapping.target_description_type.buffer = 100.0 mapping.target_descriptor.buffer =", "sum(distribution) * random.random() total = 0.0 for i in range(0, len(distribution)): total +=", "initial_unhappiness if randomized > initials: string = workspace.target logging.info(f\"target string selected: {workspace.target} for", "if equivalent: logging.info(\"already exists...activate descriptors & fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return # check to", "- formulas.Temperature) / 100.0 assert not formulas.coin_flip(probability_of_fizzle) # choose a structure at random", "= 100.0 else: description.build() def bottom_up_bond_scout(codelet): source = choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source) destination =", "+= [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] # use conceptual depth to choose a relation value_list", "1.0) incompatible_groups = bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups, bond, \"groups\", 1.0, 1.0) # fight all", "random.random() logging.info(f\"{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}\") return weighted_strength1 > rhs def __fight(structure,", "= object_list[i] coderack.propose_rule( slipnet.letter_category, descriptor, slipnet.letter, relation, codelet ) def rule_strength_tester(codelet): rule =", "= category.get_related_node(slipnet.group_category) coderack.propose_group( objects, bonds, group_category, direction_category, bond_facet, codelet ) def group_strength_tester(codelet): #", ") for incompatible in incompatibles: incompatible_spans = ( incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span() ) assert", "__get_descriptors( bond_facet, source, destination ) forward_bond = source_descriptor.get_bond_category(destination_descriptor) if forward_bond == slipnet.identity: forward_bond", "search = True bond_facet = None # find leftmost object in group with", "new_list += [node] object_list = new_list # should this be += ?? assert", "in incompatible_bonds: incompatible.break_the_structure() for incompatible in incompatible_groups: incompatible.break_the_structure() for incompatible in incompatible_correspondences: incompatible.break_the_structure()", "source != destination: bonds += [source.right_bond] objects += [source.right_bond.right_object] source = source.right_bond.right_object coderack.propose_group(", "[slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): mydirection = slipnet.left else: mydirection =", "if o.changed] # assert len(changed_objects) < 2 # if there are no changed", "continue incompatible_bonds += [right_bond] next_object = objekt # if incompatible bonds exist -", "target_object.described(node): if target_object.distinguishing_descriptor(node): new_list += [node] object_list = new_list # should this be", "first_bond.direction_category: direction = None if not first_bond or first_bond.direction_category != direction: if mydirection", "object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) flip_target_object = True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet,", "!= direction: if source.left_bond.direction_category: continue if not bond_facet or bond_facet == source.left_bond.facet: bond_facet", "target_unhappiness = workspace.target.intra_string_unhappiness logging.info( f\"initial : relevance = {initial_relevance}, \" f\"unhappiness = {int(initial_unhappiness)}\"", "type_name): initial_relevance = relevance_method(workspace.initial, slipnode) target_relevance = relevance_method(workspace.target, slipnode) initial_unhappiness = workspace.initial.intra_string_unhappiness target_unhappiness", "bond_facet def __get_descriptors(bond_facet, source, destination): source_descriptor = source.get_descriptor(bond_facet) destination_descriptor = destination.get_descriptor(bond_facet) assert source_descriptor", "search = True destination = source search = True while search: search =", "this object corresponds to another object in the workspace # object_list = the", "True assert destination != source objects = [source] bonds = [] while source", "structure.string == workspace.initial: return \"initial\" return \"other\" def __get_scout_source(slipnode, relevance_method, type_name): initial_relevance =", "left_bond = objekt.left_bond if left_bond: if left_bond.left_object == previous: continue if left_bond.direction_category ==", "150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.4: distribution", "and __all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation != 100.0 ): object_from_target = object_from_target.flipped_version() concept_mappings = formulas.get_mappings(", "if structure.source.group == structure.destination.group: break_objects += [structure.source.group] # try to break all objects", "formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) descriptor = object_list[i] # choose the", "workspace.target.intra_string_unhappiness logging.info( f\"initial : relevance = {initial_relevance}, \" f\"unhappiness = {int(initial_unhappiness)}\" ) logging.info(", "codelet, ) def bond_strength_tester(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() strength = bond.total_strength probability", "direction: if source.left_bond.direction_category: continue if not bond_facet or bond_facet == source.left_bond.facet: bond_facet =", "source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) else: coderack.propose_bond( destination, source, category,", "source.left_bond.category != category: continue if source.left_bond.direction_category != direction: if source.left_bond.direction_category: continue if not", "codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target assert object_from_initial in workspace.objects assert (", "abs(diff) < 2: relations = {0: slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor} relation =", "group = codelet.arguments[0] __show_which_string_object_is_from(group) equivalent = group.string.equivalent_group(group) if equivalent: logging.info(\"already exists...activate descriptors &", "def bond_builder(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() assert bond.source in workspace.objects or bond.destination", "objects for structure in break_objects: break_probability = formulas.temperature_adjusted_probability( structure.total_strength / 100.0 ) if", "assert not source.spans_string() if source.leftmost: mydirection = slipnet.right elif source.rightmost: mydirection = slipnet.left", "for the incompatible bond incompatible_bond = correspondence.get_incompatible_bond() if incompatible_bond: # bond found -", "category.get_related_node(slipnet.group_category) coderack.propose_group( objects, bonds, group_category, direction_category, bond_facet, codelet ) def group_strength_tester(codelet): # update", "100.0 def bottom_up_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects ) object_from_target = choose_unmodified_object( \"inter_string_salience\",", "import Letter from .replacement import Replacement from .slipnet import slipnet from .workspace_formulas import", "= choose_neighbour(source) assert destination logging.info(f\"destination: {destination}\") bond_facet = __get_bond_facet(source, destination) logging.info(f\"chosen bond facet:", "concepts for mapping in correspondence.concept_mappings: mapping.initial_description_type.buffer = 100.0 mapping.initial_descriptor.buffer = 100.0 mapping.target_description_type.buffer =", "return incompatibles = correspondence.get_incompatible_correspondences() # fight against all correspondences if incompatibles: correspondence_spans =", "a rule with no changes if not changed_objects: return coderack.propose_rule(None, None, None, None,", "in [forward_bond, backward_bond] if category == forward_bond: coderack.propose_bond( source, destination, category, bond_facet, source_descriptor,", "string.objects: if objekt.leftmost: leftmost = objekt while leftmost.group and leftmost.group.bond_category == slipnet.sameness: leftmost", "formulas.select_list_position(values) chosen = sliplinks[i] chosen_property = chosen.destination coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet )", "formulas.temperature_adjusted_probability(strength / 100.0) logging.info(f\"bond strength = {strength} for {bond}\") assert formulas.coin_flip(probability) bond.facet.buffer =", "stop = sum(distribution) * random.random() total = 0.0 for i in range(0, len(distribution)):", "m.label != slipnet.opposite]) == 0 def __structure_versus_structure(structure1, weight1, structure2, weight2): structure1.update_strength() structure2.update_strength() weighted_strength1", "slipnet.sameness logging.info(f\"proposing {category.name} bond \") coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet,", "top_down_group_scout__category(codelet): group_category = codelet.arguments[0] category = group_category.get_related_node(slipnet.bond_category) assert category source = __get_scout_source( category,", "return for structure in break_objects: structure.break_the_structure() def bottom_up_description_scout(codelet): chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert", "activations += [slipnet.right.activation] if not formulas.select_list_position(activations): direction = slipnet.left else: direction = slipnet.right", "structure_weight, incompatible, incompatible_weight ): logging.info(f\"lost fight with {incompatible}\") return False logging.info(f\"won fight with", ") def top_down_group_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"direction\" )", "group.object_list[-1] for objekt in reversed(group.object_list[:-1]): right_bond = objekt.right_bond if right_bond: if right_bond.right_object ==", "def replacement_finder(): # choose random letter in initial string letters = [o for", "coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength, rule) def replacement_finder(): # choose random letter in initial string", "codelet.arguments[0] if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return rule.update_strength() assert rule.total_strength # fight against other rules", "None if workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule = workspace.rule assert __structure_versus_structure( correspondence, 1.0, incompatible_rule,", "workspace.initial.objects if not o != changed and o.get_description_type(letter) ] if not len(other_objects_of_same_letter): object_list", "object_from_initial.spans_string() and object_from_target.spans_string() and slipnet.direction_category in initial_description_types and __all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation != 100.0", "= letter_of_initial_string.left_index more_letters = [ o for o in workspace.modified.objects if isinstance(o, Letter)", "= True # find rightmost object in group with these bonds search =", "string letters = [o for o in workspace.initial.objects if isinstance(o, Letter)] letter_of_initial_string =", "not bond_facet or bond_facet == source.left_bond.facet: bond_facet = source.left_bond.facet direction = source.left_bond.direction_category source", "if right_bond: if right_bond.right_object == next_object: continue if right_bond.direction_category == group.direction_category: continue incompatible_bonds", "= objekt while leftmost.group and leftmost.group.bond_category == slipnet.sameness: leftmost = leftmost.group if leftmost.spans_string():", "assert category in [forward_bond, backward_bond] if category == forward_bond: coderack.propose_bond( source, destination, category,", "coderack.propose_rule( slipnet.letter_category, descriptor, slipnet.letter, relation, codelet ) def rule_strength_tester(codelet): rule = codelet.arguments[0] rule.update_strength()", "check to see if the # string description needs to be flipped opposites", ".workspace_formulas import choose_directed_neighbor from .workspace_formulas import choose_neighbour from .workspace_formulas import choose_unmodified_object from .workspace_formulas", "if any are distinguishing distinguishing_mappings = [m for m in concept_mappings if m.distinguishing()]", "search = True # find rightmost object in group with these bonds search", "= {target_relevance}, \" f\"unhappiness = {int(target_unhappiness)}\" ) string = workspace.initial relevances = initial_relevance", "bond_facet, bonds) assert bonds group_category = category.get_related_node(slipnet.group_category) coderack.propose_group( objects, bonds, group_category, direction_category, bond_facet,", "weight1, structure2, weight2): structure1.update_strength() structure2.update_strength() weighted_strength1 = formulas.temperature_adjusted_value( structure1.total_strength * weight1 ) weighted_strength2", "group group = codelet.arguments[0] __show_which_string_object_is_from(group) equivalent = group.string.equivalent_group(group) if equivalent: logging.info(\"already exists...activate descriptors", "random.choice(structures) __show_which_string_object_is_from(structure) break_objects = [structure] if isinstance(structure, Bond): if structure.source.group: if structure.source.group ==", "= initial_ascii - modified_ascii if abs(diff) < 2: relations = {0: slipnet.sameness, -1:", "2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.6: distribution =", "if not first_bond: logging.info(\"no first_bond2\") else: logging.info(f\"first_bond2: {first_bond}\") if first_bond and not first_bond.direction_category:", "if not bond_facet or bond_facet == destination.right_bond.facet: bond_facet = destination.right_bond.facet direction = source.right_bond.direction_category", "rule_builder(codelet): rule = codelet.arguments[0] if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return rule.update_strength() assert rule.total_strength # fight", "__fight_incompatibles(incompatible_bonds, group, \"bonds\", 1.0, 1.0) # fight incompatible groups # fight all groups", "weighted_strength1 > rhs def __fight(structure, structure_weight, incompatibles, incompatible_weight): if not (incompatibles and len(incompatibles)):", "first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond2\") else:", "descriptor = object_list[i] # choose the relation (change the letmost object to \"successor\"", "rule = codelet.arguments[0] if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return rule.update_strength() assert rule.total_strength # fight against", "if correspondence.reflexive(): # if the correspondence exists, activate concept mappings # and add", "[5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density >", "= bond.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) logging.info(f\"bond strength = {strength} for {bond}\")", "strength, correspondence) def correspondence_builder(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target", "[] while source != destination: bonds += [source.right_bond] objects += [source.right_bond.right_object] source =", "slipnet.predecessor} relation = relations[diff] logging.info(f\"Relation found: {relation.name}\") else: relation = None logging.info(\"no relation", "if incompatible_group: assert __structure_versus_structure( correspondence, 1.0, incompatible_group, 1.0 ) # if there is", "leftmost.rightmost # choose a random bond from list chosen_bond = random.choice(bonds) category =", "& Fizzle\") return incompatible_bonds = bond.get_incompatible_bonds() logging.info(f\"number of incompatible_bonds: {len(incompatible_bonds)}\") if len(incompatible_bonds): logging.info(str(incompatible_bonds[0]))", "= slipnet.left else: mydirection = slipnet.right if mydirection == slipnet.left: first_bond = source.left_bond", "len(workspace.initial) + len(workspace.target) - 2 bond_density = number_of_bonds / nearly_total_length if bond_density >", "__fight_incompatibles( incompatibles, structure, name, structure_weight, incompatible_weight ): if len(incompatibles): if __fight(structure, structure_weight, incompatibles,", "workspace.objects or bond.destination in workspace.objects for string_bond in bond.string.bonds: if bond.same_neighbours(string_bond) and bond.same_categories(string_bond):", "[source.right_bond] objects += [source.right_bond.right_object] source = source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet,", "Letter) and o.left_index == position ] letter_of_modified_string = more_letters and more_letters[0] or None", "= random.choice(letters) logging.info(f\"selected letter in initial string = {letter_of_initial_string}\") if letter_of_initial_string.replacement: logging.info( f\"Replacement", "\"direction\" ) logging.info(f\"source chosen = {source}\") assert not source.spans_string() if source.leftmost: mydirection =", "= correspondence.flip_target_object if want_flip: flipper = object_from_target.flipped_version() target_not_flipped = not workspace.target.equivalent_group(flipper) else: target_not_flipped", "bond.get_incompatible_bonds() logging.info(f\"number of incompatible_bonds: {len(incompatible_bonds)}\") if len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds, bond, \"bonds\", 1.0,", "choose_directed_neighbor from .workspace_formulas import choose_neighbour from .workspace_formulas import choose_unmodified_object from .workspace_formulas import workspace", "equivalent.add_descriptions(group.descriptions) return # check to see if all objects are still there for", "\"Groups\", 1.0, 1.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() # create new bonds group.bond_list", "bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups, bond, \"groups\", 1.0, 1.0) # fight all incompatible correspondences incompatible_correspondences", "and the distingushing descriptors if changed.correspondence: target_object = changed.correspondence.object_from_target new_list = [] slippages", "source.spans_string() if source.leftmost: mydirection = slipnet.right elif source.rightmost: mydirection = slipnet.left else: activations", "description value_list = [] for node in object_list: depth = node.conceptual_depth value =", "object_from_initial in workspace.objects assert ( object_from_target in workspace.objects or correspondence.flip_target_object and not workspace.target.equivalent_group(object_from_target.flipped_version())", "workspace.objects if description.object.described(description.descriptor): description.description_type.buffer = 100.0 description.descriptor.buffer = 100.0 else: description.build() def bottom_up_bond_scout(codelet):", "if not first_bond or first_bond.category != category: if category == slipnet.sameness and isinstance(source,", "choose the relation (change the letmost object to \"successor\" or \"d\" object_list =", "first_bond.category != category: if category == slipnet.sameness and isinstance(source, Letter): group = Group(", "Correspondence from .group import Group from .letter import Letter from .replacement import Replacement", "assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet, strength) def description_builder(codelet): description = codelet.arguments[0] assert description.object in", "initial.leftmost or initial.rightmost and target.leftmost or target.rightmost: # search for the incompatible bond", "assert __fight_incompatibles(incompatible_groups, group, \"Groups\", 1.0, 1.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() # create", "1.0, 1.0] elif density > 0.2: distribution = [1.0, 1.0, 2.0, 5.0, 150.0,", "bonds = [] objects = [leftmost] while leftmost.right_bond: bonds += [leftmost.right_bond] leftmost =", "1.0, 1.0] else: distribution = [1.0, 1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0,", "[] for i in range(1, len(group.object_list)): object1 = group.object_list[i - 1] object2 =", "incompatible_bonds = [] # incompatible bond list if len(group.object_list) > 1: previous =", "i = formulas.select_list_position(values) chosen = sliplinks[i] chosen_property = chosen.destination coderack.propose_description( chosen_object, chosen_property.category(), chosen_property,", "random.choice(letters) logging.info(f\"selected letter in initial string = {letter_of_initial_string}\") if letter_of_initial_string.replacement: logging.info( f\"Replacement already", "None assert letter_of_modified_string position -= 1 initial_ascii = ord(workspace.initial_string[position]) modified_ascii = ord(workspace.modified_string[position]) diff", "logging.info(f\"number of incompatible_bonds: {len(incompatible_bonds)}\") if len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds, bond, \"bonds\", 1.0, 1.0)", "bond.same_categories(string_bond): if bond.direction_category: bond.direction_category.buffer = 100.0 bond.category.buffer = 100.0 logging.info(\"already exists: activate descriptors", "assert target_candidates object_from_target = choose_unmodified_object( \"inter_string_salience\", target_candidates ) assert object_from_initial.spans_string() == object_from_target.spans_string() #", "assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0) workspace.build_rule(rule) def __get_cut_off(density): if density > 0.8: distribution", "workspace.build_rule(rule) def __get_cut_off(density): if density > 0.8: distribution = [5.0, 150.0, 5.0, 2.0,", "workspace.rule: assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0) workspace.build_rule(rule) def __get_cut_off(density): if density > 0.8:", "# if both objects span the strings, check to see if the #", "logging.info(\"building group\") def rule_builder(codelet): rule = codelet.arguments[0] if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return rule.update_strength() assert", "fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return # check to see if all objects are still", "= descriptions[i] coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def description_strength_tester(codelet): description = codelet.arguments[0]", "None, None, codelet) changed = changed_objects[-1] # generate a list of distinguishing descriptions", "1.0, 1.0] elif density > 0.4: distribution = [1.0, 2.0, 5.0, 150.0, 5.0,", "which it belongs leftmost = None for objekt in string.objects: if objekt.leftmost: leftmost", "if this object corresponds to another object in the workspace # object_list =", "no changes if not changed_objects: return coderack.propose_rule(None, None, None, None, codelet) changed =", "return \"initial\" return \"other\" def __get_scout_source(slipnode, relevance_method, type_name): initial_relevance = relevance_method(workspace.initial, slipnode) target_relevance", "chosen_property, codelet ) def top_down_description_scout(codelet): description_type = codelet.arguments[0] chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert", "to {category.name}\") bond_facet = None # find leftmost object in group with these", "5.0, 2.0, 1.0, 1.0, 1.0] else: distribution = [1.0, 1.0, 1.0, 2.0, 5.0,", "* random.random() total = 0.0 for i in range(0, len(distribution)): total += distribution[i]", "source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category = slipnet.sameness coderack.propose_bond( source, destination,", "target_candidates = [] for objekt in workspace.target.objects: for description in objekt.relevant_descriptions(): if description.descriptor", "mapping in correspondence.concept_mappings: if mapping.label: mapping.label.buffer = 100.0 if not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings +=", "category == slipnet.identity: category = slipnet.sameness coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor,", "list if len(group.object_list) > 1: previous = group.object_list[0] for objekt in group.object_list[1:]: left_bond", "objects, bonds, group_category, direction_category, bond_facet, codelet ) def group_strength_tester(codelet): # update strength value", "logging.info(f\"broke the {name}\") return True logging.info(f\"failed to break {name}: Fizzle\") return False logging.info(f\"no", "or bond_facet == source.left_bond.facet: bond_facet = source.left_bond.facet direction = source.left_bond.direction_category source = source.left_bond.left_object", "bond_facet return bond_facet def __get_descriptors(bond_facet, source, destination): source_descriptor = source.get_descriptor(bond_facet) destination_descriptor = destination.get_descriptor(bond_facet)", "if __fight(structure, structure_weight, incompatibles, incompatible_weight): logging.info(f\"broke the {name}\") return True logging.info(f\"failed to break", "# update strength value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) equivalent =", "target_not_flipped)) ) if correspondence.reflexive(): # if the correspondence exists, activate concept mappings #", "initial = correspondence.object_from_initial target = correspondence.object_from_target if initial.leftmost or initial.rightmost and target.leftmost or", "of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) equivalent = group.string.equivalent_group(group) if equivalent: logging.info(\"already", "destination: {destination}\") bond_descriptors = __get_descriptors(bond_facet, source, destination) source_descriptor, destination_descriptor = bond_descriptors logging.info(f\"source descriptor:", "logging.info(f\"target string selected: {workspace.target}\") else: logging.info(f\"initial string selected: {workspace.initial}\") # find leftmost object", "strength = {strength} for {bond}\") assert formulas.coin_flip(probability) bond.facet.buffer = 100.0 bond.source_descriptor.buffer = 100.0", "bond_facet = None # find leftmost object in group with these bonds search", "{strength} for {bond}\") assert formulas.coin_flip(probability) bond.facet.buffer = 100.0 bond.source_descriptor.buffer = 100.0 bond.destination_descriptor.buffer =", "[m for m in concept_mappings if m.distinguishing()] assert distinguishing_mappings # if both objects", "assert __fight_incompatibles(incompatible_bonds, bond, \"bonds\", 1.0, 1.0) incompatible_groups = bond.source.get_common_groups(bond.destination) assert __fight_incompatibles(incompatible_groups, bond, \"groups\",", "import choose_neighbour from .workspace_formulas import choose_unmodified_object from .workspace_formulas import workspace from .workspace_object import", "True workspace.changed_object = letter_of_initial_string logging.info(\"building replacement\") def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category = codelet.arguments[0] source", "source_descriptor, destination_descriptor = bond_descriptors logging.info(f\"source descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\") category = source_descriptor.get_bond_category(destination_descriptor)", ">= stop: return i + 1 return len(distribution) def rule_translator(): assert workspace.rule if", "= [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): direction = slipnet.left else: direction", "# fight all incompatible correspondences incompatible_correspondences = [] if bond.left_object.leftmost or bond.right_object.rightmost: if", "{source_descriptor.name.upper()}\") logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\") category = source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity:", "!= destination: bonds += [source.right_bond] objects += [source.right_bond.right_object] source = source.right_bond.right_object coderack.propose_group( objects,", "name, structure_weight, incompatible_weight ): if len(incompatibles): if __fight(structure, structure_weight, incompatibles, incompatible_weight): logging.info(f\"broke the", "for m in concept_mappings if m.distinguishing()] assert distinguishing_mappings # if both objects span", "source_descriptor, destination_descriptor, codelet, ) def rule_scout(codelet): assert workspace.number_of_unreplaced_objects() == 0 changed_objects = [o", "if position: object_list += [position] letter = changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter = [ o for", "= 100.0 coderack.new_codelet(\"group-builder\", codelet, strength) def group_builder(codelet): # update strength value of the", "if not destination.right_bond: continue if destination.right_bond.category != category: continue if destination.right_bond.direction_category != direction:", "return weighted_strength1 > rhs def __fight(structure, structure_weight, incompatibles, incompatible_weight): if not (incompatibles and", "or target.rightmost: # search for the incompatible bond incompatible_bond = correspondence.get_incompatible_bond() if incompatible_bond:", "first_bond\") else: logging.info(f\"first_bond: {first_bond}\") if first_bond and not first_bond.direction_category: direction = None if", "__structure_versus_structure(rule, 1.0, workspace.rule, 1.0) workspace.build_rule(rule) def __get_cut_off(density): if density > 0.8: distribution =", "in opposites] flip_target_object = False if ( object_from_initial.spans_string() and object_from_target.spans_string() and slipnet.direction_category in", "not formulas.coin_flip(probability_of_fizzle) # choose a structure at random structures = [ s for", "changes if not changed_objects: return coderack.propose_rule(None, None, None, None, codelet) changed = changed_objects[-1]", "if source.leftmost: mydirection = slipnet.right elif source.rightmost: mydirection = slipnet.left else: activations =", "group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0 if group.direction_category: group.direction_category.buffer = 100.0 coderack.new_codelet(\"group-builder\", codelet, strength) def group_builder(codelet):", "# if there is an incompatible rule, fight against it incompatible_rule = None", "source_descriptor, destination_descriptor, codelet, ) def bond_strength_tester(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() strength =", ">= formulas.actual_temperature if workspace.rule.build_translated_rule(): workspace.found_answer = True else: temperature.clamp_time = coderack.codelets_run + 100", "# fight against all correspondences if incompatibles: correspondence_spans = ( correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span()", "= ord(workspace.modified_string[position]) diff = initial_ascii - modified_ascii if abs(diff) < 2: relations =", "# try to break all objects for structure in break_objects: break_probability = formulas.temperature_adjusted_probability(", "slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): direction =", "random.random() > 0.5: string = workspace.target logging.info(f\"target string selected: {workspace.target}\") else: logging.info(f\"initial string", "incompatibles, incompatible_weight): if not (incompatibles and len(incompatibles)): return True for incompatible in incompatibles:", "some methods common to the codelets def __show_which_string_object_is_from(structure): if not structure: return \"unstructured\"", "[] position = changed.get_descriptor(slipnet.string_position_category) if position: object_list += [position] letter = changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter", "= (100.0 - formulas.Temperature) / 100.0 assert not formulas.coin_flip(probability_of_fizzle) # choose a structure", "__show_which_string_object_is_from(bond) bond.update_strength() strength = bond.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) logging.info(f\"bond strength =", "more_letters and more_letters[0] or None assert letter_of_modified_string position -= 1 initial_ascii = ord(workspace.initial_string[position])", "formulas.similar_property_links(description.descriptor) assert sliplinks values = [ sliplink.degree_of_association() * sliplink.destination.activation for sliplink in sliplinks", "in initial string = {letter_of_initial_string}\") if letter_of_initial_string.replacement: logging.info( f\"Replacement already found for {letter_of_initial_string},", "rule) def replacement_finder(): # choose random letter in initial string letters = [o", "range(1, len(group.object_list)): object1 = group.object_list[i - 1] object2 = group.object_list[i] if not object1.right_bond:", "{type_name}\") else: logging.info(f\"initial string selected: {workspace.initial} for {type_name}\") source = choose_unmodified_object(\"intra_string_salience\", string.objects) return", "= codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target want_flip = correspondence.flip_target_object if want_flip:", "in reversed(group.object_list[:-1]): right_bond = objekt.right_bond if right_bond: if right_bond.right_object == next_object: continue if", "len(changed_objects) < 2 # if there are no changed objects, propose a rule", "bond.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) logging.info(f\"bond strength = {strength} for {bond}\") assert", "incompatibles = correspondence.get_incompatible_correspondences() # fight against all correspondences if incompatibles: correspondence_spans = (", "formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet, strength) def description_builder(codelet): description = codelet.arguments[0] assert description.object in workspace.objects", "{destination}\") objects = [source] bonds = [] while source != destination: bonds +=", "workspace.target logging.info(f\"target string selected: {workspace.target} for {type_name}\") else: logging.info(f\"initial string selected: {workspace.initial} for", "(incompatibles and len(incompatibles)): return True for incompatible in incompatibles: if not __structure_versus_structure( structure,", "= 100.0 description.descriptor.buffer = 100.0 else: description.build() def bottom_up_bond_scout(codelet): source = choose_unmodified_object(\"intra_string_salience\", workspace.objects)", "distribution = [1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0] else:", "in workspace.objects if description.object.described(description.descriptor): description.description_type.buffer = 100.0 description.descriptor.buffer = 100.0 else: description.build() def", "the existing corr. existing = correspondence.object_from_initial.correspondence for mapping in correspondence.concept_mappings: if mapping.label: mapping.label.buffer", "m.initial_description_type != slipnet.bond_facet ] initial_description_types = [m.initial_description_type for m in opposites] flip_target_object =", "): if len(incompatibles): if __fight(structure, structure_weight, incompatibles, incompatible_weight): logging.info(f\"broke the {name}\") return True", "initial.rightmost and target.leftmost or target.rightmost: # search for the incompatible bond incompatible_bond =", "distinguishing_mappings # if both objects span the strings, check to see if the", "len(workspace.initial.bonds) + len(workspace.target.bonds) nearly_total_length = len(workspace.initial) + len(workspace.target) - 2 bond_density = number_of_bonds", "# incompatible bond list if len(group.object_list) > 1: previous = group.object_list[0] for objekt", "strength = bond.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) logging.info(f\"bond strength = {strength} for", "other rules if workspace.rule: assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0) workspace.build_rule(rule) def __get_cut_off(density): if", "= new_list # should this be += ?? assert object_list # use conceptual", "first_bond.direction_category != direction: if mydirection == slipnet.right: first_bond = source.left_bond else: first_bond =", "object_list = the union of this and the distingushing descriptors if changed.correspondence: target_object", "distinguishing descriptions for the first object # ie. string-position (left-,right-most,middle or whole) or", "= codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target assert object_from_initial in workspace.objects assert", "100.0) assert random.random() <= probability coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength, rule) def replacement_finder(): # choose", "objects incompatible_groups = group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups, group, \"Groups\", 1.0, 1.0) for incompatible in", "formulas.local_direction_category_relevance, \"bond\" ) destination = choose_directed_neighbor(source, direction) assert destination logging.info(f\"to object: {destination}\") bond_facet", "== previous: continue if left_bond.direction_category == group.direction_category: continue incompatible_bonds += [left_bond] previous =", "in group.object_list: assert o in workspace.objects # check to see if bonds are", "# find out if any are distinguishing distinguishing_mappings = [m for m in", "s for s in workspace.structures if isinstance(s, (Group, Bond, Correspondence)) ] assert structures", "coderack.propose_group( group.object_list, group.bond_list, group.group_category, group.direction_category, group.facet, codelet, ) return bonds = [] objects", "if left_bond.left_object == previous: continue if left_bond.direction_category == group.direction_category: continue incompatible_bonds += [left_bond]", "description.build() def bottom_up_bond_scout(codelet): source = choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source) destination = choose_neighbour(source) assert destination", "- post builder & activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0 if group.direction_category: group.direction_category.buffer =", "source = choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source) destination = choose_neighbour(source) assert destination logging.info(f\"destination: {destination}\") bond_facet", "position -= 1 initial_ascii = ord(workspace.initial_string[position]) modified_ascii = ord(workspace.modified_string[position]) diff = initial_ascii -", "workspace.target logging.info(f\"target string selected: {workspace.target}\") else: logging.info(f\"initial string selected: {workspace.initial}\") # find leftmost", "the codelets def __show_which_string_object_is_from(structure): if not structure: return \"unstructured\" if isinstance(structure, WorkspaceObject): return", "new bonds group.bond_list = [] for i in range(1, len(group.object_list)): object1 = group.object_list[i", "in range(0, len(distribution)): total += distribution[i] if total >= stop: return i +", "object if direction == slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond if", "and m.initial_description_type != slipnet.bond_facet ] initial_description_types = [m.initial_description_type for m in opposites] flip_target_object", "workspace.changed_object = letter_of_initial_string logging.info(\"building replacement\") def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category = codelet.arguments[0] source =", "= initial_relevance + target_relevance unhappinesses = initial_unhappiness + target_unhappiness randomized = random.random() *", "choose_unmodified_object(\"intra_string_salience\", string.objects) return source def __get_bond_facet(source, destination): bond_facet = choose_bond_facet(source, destination) assert bond_facet", "+= [object1.right_bond] for incompatible in incompatible_groups: incompatible.break_the_structure() group.build_group() group.activate_descriptions() logging.info(\"building group\") def rule_builder(codelet):", "= objekt.right_bond if right_bond: if right_bond.right_object == next_object: continue if right_bond.direction_category == group.direction_category:", "choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) descriptions = chosen_object.get_possible_descriptions(description_type) assert descriptions values = [n.activation", "conceptual depth to choose a description value_list = [] for node in object_list:", "codelet.arguments[0] source = __get_scout_source( category, formulas.local_bond_category_relevance, \"bond\" ) destination = choose_neighbour(source) logging.info(f\"source: {source},", "100.0 description.descriptor.buffer = 100.0 else: description.build() def bottom_up_bond_scout(codelet): source = choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source)", "else: first_bond = source.right_bond if not first_bond or first_bond.category != category: # check", "= mapping.slippability() / 100.0 probability_of_slippage = formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage): return True return False", "choose_unmodified_object( \"relative_importance\", workspace.initial.objects ) descriptors = object_from_initial.relevant_distinguishing_descriptors() slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode initial_descriptor", "slipnode: initial_descriptor = mapping.target_descriptor target_candidates = [] for objekt in workspace.target.objects: for description", ") def description_strength_tester(codelet): description = codelet.arguments[0] description.descriptor.buffer = 100.0 description.update_strength() strength = description.total_strength", "group_category = category.get_related_node(slipnet.group_category) logging.info(f\"trying from {source} to {category.name}\") bond_facet = None # find", "destination_descriptor = destination.get_descriptor(bond_facet) assert source_descriptor assert destination_descriptor return source_descriptor, destination_descriptor def __all_opposite_mappings(mappings): return", "assert ( object_from_target in workspace.objects or correspondence.flip_target_object and not workspace.target.equivalent_group(object_from_target.flipped_version()) ) correspondence.update_strength() strength", "random structures = [ s for s in workspace.structures if isinstance(s, (Group, Bond,", "= codelet.arguments[0] __show_which_string_object_is_from(group) equivalent = group.string.equivalent_group(group) if equivalent: logging.info(\"already exists...activate descriptors & fizzle\")", "facet = group.facet new_bond = Bond( source, destination, category, facet, source.get_descriptor(facet), destination.get_descriptor(facet), )", "== slipnet.sameness and isinstance(source, Letter): group = Group( source.string, slipnet.sameness_group, None, slipnet.letter_category, [source],", "correspondence.concept_mappings: if mapping.label: mapping.label.buffer = 100.0 if not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings += [mapping] return", "logging.info( f\"target : relevance = {target_relevance}, \" f\"unhappiness = {int(target_unhappiness)}\" ) string =", "f\"Replacement already found for {letter_of_initial_string}, so fizzling\" ) return position = letter_of_initial_string.left_index more_letters", "target_relevance unhappinesses = initial_unhappiness + target_unhappiness randomized = random.random() * (relevances + unhappinesses)", "top_down_bond_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"bond\" ) destination =", "= source.right_bond.direction_category destination = destination.right_bond.right_object search = True assert destination != source objects", "in break_objects: break_probability = formulas.temperature_adjusted_probability( structure.total_strength / 100.0 ) if formulas.coin_flip(break_probability): return for", "continue if right_bond.direction_category == group.direction_category: continue incompatible_bonds += [right_bond] next_object = objekt #", "object2 else: source = object2 destination = object1 category = group.group_category.get_related_node(slipnet.bond_category) facet =", "relation found\") letter_of_initial_string.replacement = Replacement( letter_of_initial_string, letter_of_modified_string, relation ) if relation != slipnet.sameness:", "incompatible_group = None # if there is an incompatible bond then fight against", "/ 100.0) assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet, strength) def description_builder(codelet): description = codelet.arguments[0] assert", "+= [source.right_bond] objects += [source.right_bond.right_object] source = source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction,", "search: search = False if not source.left_bond: continue if source.left_bond.category != category: continue", "if description.descriptor == initial_descriptor: target_candidates += [objekt] assert target_candidates object_from_target = choose_unmodified_object( \"inter_string_salience\",", "__get_scout_source( direction, formulas.local_direction_category_relevance, \"direction\" ) logging.info(f\"source chosen = {source}\") assert not source.spans_string() if", "len(distribution)): total += distribution[i] if total >= stop: return i + 1 return", "description_type = codelet.arguments[0] chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) descriptions = chosen_object.get_possible_descriptions(description_type)", "see if all objects are still there for o in group.object_list: assert o", "target_candidates object_from_target = choose_unmodified_object( \"inter_string_salience\", target_candidates ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get", "found: {relation.name}\") else: relation = None logging.info(\"no relation found\") letter_of_initial_string.replacement = Replacement( letter_of_initial_string,", "in objekt.relevant_descriptions(): if description.descriptor == initial_descriptor: target_candidates += [objekt] assert target_candidates object_from_target =", "objekt.relevant_descriptions(): if description.descriptor == initial_descriptor: target_candidates += [objekt] assert target_candidates object_from_target = choose_unmodified_object(", "isinstance(structure, Bond): if structure.source.group: if structure.source.group == structure.destination.group: break_objects += [structure.source.group] # try", "{source}, destination: {destination}\") assert destination bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors(", "search = True destination = source while search: search = False if not", "# if this object corresponds to another object in the workspace # object_list", "= codelet.arguments[0] assert description.object in workspace.objects if description.object.described(description.descriptor): description.description_type.buffer = 100.0 description.descriptor.buffer =", "True while search: search = False if not destination.right_bond: continue if destination.right_bond.category !=", "corr. existing = correspondence.object_from_initial.correspondence for mapping in correspondence.concept_mappings: if mapping.label: mapping.label.buffer = 100.0", "if there is an incompatible rule, fight against it incompatible_rule = None if", "first_bond.direction_category: direction = None assert first_bond assert first_bond.direction_category == direction logging.info(f\"possible group: {first_bond}\")", "incompatible_bonds: {len(incompatible_bonds)}\") if len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds, bond, \"bonds\", 1.0, 1.0) incompatible_groups =", "coderack from .correspondence import Correspondence from .group import Group from .letter import Letter", "object_list += [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] # use conceptual depth to choose a relation", "slipnet.letter, relation, codelet ) def rule_strength_tester(codelet): rule = codelet.arguments[0] rule.update_strength() probability = formulas.temperature_adjusted_probability(rule.total_strength", "fight incompatible groups # fight all groups containing these objects incompatible_groups = group.get_incompatible_groups()", "nearly_total_length = len(workspace.initial) + len(workspace.target) - 2 bond_density = number_of_bonds / nearly_total_length if", "and more_letters[0] or None assert letter_of_modified_string position -= 1 initial_ascii = ord(workspace.initial_string[position]) modified_ascii", "== slipnet.string_position_category and m.initial_description_type != slipnet.bond_facet ] initial_description_types = [m.initial_description_type for m in", "= object_from_target.flipped_version() concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) flip_target_object = True", "rhs def __fight(structure, structure_weight, incompatibles, incompatible_weight): if not (incompatibles and len(incompatibles)): return True", "activate concept mappings # and add new ones to the existing corr. existing", "slipnode = formulas.choose_slipnode_by_conceptual_depth(descriptors) assert slipnode initial_descriptor = slipnode for mapping in workspace.slippages(): if", "= [ o for o in workspace.modified.objects if isinstance(o, Letter) and o.left_index ==", "group.object_list[0] for objekt in group.object_list[1:]: left_bond = objekt.left_bond if left_bond: if left_bond.left_object ==", "= source.left_bond else: first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond2\") else: logging.info(f\"first_bond2:", "= group.object_list[0] for objekt in group.object_list[1:]: left_bond = objekt.left_bond if left_bond: if left_bond.left_object", "Letter)] letter_of_initial_string = random.choice(letters) logging.info(f\"selected letter in initial string = {letter_of_initial_string}\") if letter_of_initial_string.replacement:", "a random bond from list chosen_bond = random.choice(bonds) category = chosen_bond.category direction_category =", "import temperature from .bond import Bond from .bond import possible_group_bonds from .coderack import", "\"other\" def __get_scout_source(slipnode, relevance_method, type_name): initial_relevance = relevance_method(workspace.initial, slipnode) target_relevance = relevance_method(workspace.target, slipnode)", "values = [n.activation for n in descriptions] i = formulas.select_list_position(values) chosen_property = descriptions[i]", "None # if there is an incompatible bond then fight against it initial", "== workspace.initial: return \"initial\" return \"other\" def __get_scout_source(slipnode, relevance_method, type_name): initial_relevance = relevance_method(workspace.initial,", "1 return len(distribution) def rule_translator(): assert workspace.rule if len(workspace.initial) == 1 and len(workspace.target)", "# get the posible concept mappings concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(),", "propose a rule with no changes if not changed_objects: return coderack.propose_rule(None, None, None,", "initial_unhappiness + target_unhappiness randomized = random.random() * (relevances + unhappinesses) initials = initial_relevance", "__fight(structure, structure_weight, incompatibles, incompatible_weight): logging.info(f\"broke the {name}\") return True logging.info(f\"failed to break {name}:", "object_from_target in workspace.objects assert initial_in_objects or ( not target_in_objects and (not (want_flip and", "destination bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination )", "source, destination) source_descriptor, destination_descriptor = bond_descriptors logging.info(f\"source descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\") category", "= source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet ) # noinspection PyStringFormat", "category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def bond_strength_tester(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength()", "= False if ( object_from_initial.spans_string() and object_from_target.spans_string() and slipnet.direction_category in initial_description_types and __all_opposite_mappings(formulas.opposite_mappings)", "coderack.new_codelet(\"correspondence-builder\", codelet, strength, correspondence) def correspondence_builder(codelet): correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target", "object_from_target.flipped_version() target_not_flipped = not workspace.target.equivalent_group(flipper) else: target_not_flipped = False initial_in_objects = object_from_initial in", "the string object_list = [] position = changed.get_descriptor(slipnet.string_position_category) if position: object_list += [position]", "objekt in string.objects: if objekt.leftmost: leftmost = objekt while leftmost.group and leftmost.group.bond_category ==", "actual codelets def breaker(): probability_of_fizzle = (100.0 - formulas.Temperature) / 100.0 assert not", "& activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0 if group.direction_category: group.direction_category.buffer = 100.0 coderack.new_codelet(\"group-builder\", codelet,", "bond.update_strength() strength = bond.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) logging.info(f\"bond strength = {strength}", "weight2 ) rhs = (weighted_strength1 + weighted_strength2) * random.random() logging.info(f\"{weighted_strength1} > {rhs}: {weighted_strength1", "# find leftmost object in group with these bonds while search: search =", "slipnet.letter_category, [source], [], ) probability = group.single_letter_group_probability() assert random.random() >= probability coderack.propose_single_letter_group(source, codelet)", "in object_list: node = node.apply_slippages(slippages) if target_object.described(node): if target_object.distinguishing_descriptor(node): new_list += [node] object_list", "objekt in workspace.target.objects: for description in objekt.relevant_descriptions(): if description.descriptor == initial_descriptor: target_candidates +=", "> 0.2: distribution = [1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0,", "descriptions = chosen_object.get_possible_descriptions(description_type) assert descriptions values = [n.activation for n in descriptions] i", "category = group_category.get_related_node(slipnet.bond_category) assert category source = __get_scout_source( category, formulas.local_bond_category_relevance, \"group\" ) assert", "with these bonds search = True while search: search = False if not", "ie. string-position (left-,right-most,middle or whole) or letter category # if it is the", "{letter_of_initial_string}\") if letter_of_initial_string.replacement: logging.info( f\"Replacement already found for {letter_of_initial_string}, so fizzling\" ) return", "= group.group_category.get_related_node(slipnet.bond_category) facet = group.facet new_bond = Bond( source, destination, category, facet, source.get_descriptor(facet),", "formulas.select_list_position(value_list) relation = object_list[i] coderack.propose_rule( slipnet.letter_category, descriptor, slipnet.letter, relation, codelet ) def rule_strength_tester(codelet):", "# string description needs to be flipped opposites = [ m for m", "2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.2:", "PyStringFormat def group_scout__whole_string(codelet): string = workspace.initial if random.random() > 0.5: string = workspace.target", "fight group.update_strength() assert __fight_incompatibles(incompatible_bonds, group, \"bonds\", 1.0, 1.0) # fight incompatible groups #", "codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength() strength = group.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random()", "= [2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density", "if source.left_bond.direction_category != direction: if source.left_bond.direction_category: continue if not bond_facet or bond_facet ==", "of the same direction incompatible_bonds = [] # incompatible bond list if len(group.object_list)", "if changed.correspondence: target_object = changed.correspondence.object_from_target new_list = [] slippages = workspace.slippages() for node", "import choose_bond_facet from .workspace_formulas import choose_directed_neighbor from .workspace_formulas import choose_neighbour from .workspace_formulas import", ") def group_strength_tester(codelet): # update strength value of the group group = codelet.arguments[0]", "changed_objects: return coderack.propose_rule(None, None, None, None, codelet) changed = changed_objects[-1] # generate a", "to the existing corr. existing = correspondence.object_from_initial.correspondence for mapping in correspondence.concept_mappings: if mapping.label:", "incompatible_correspondences = bond.get_incompatible_correspondences() if incompatible_correspondences: logging.info(\"trying to break incompatible correspondences\") assert __fight(bond, 2.0,", "codelet, ) else: coderack.propose_bond( destination, source, category, bond_facet, destination_descriptor, source_descriptor, codelet, ) def", "choose a structure at random structures = [ s for s in workspace.structures", "bond.build_bond() # pylint: disable=too-many-branches # pylint: disable=too-many-statements def top_down_group_scout__category(codelet): group_category = codelet.arguments[0] category", "{weighted_strength1 > rhs}\") return weighted_strength1 > rhs def __fight(structure, structure_weight, incompatibles, incompatible_weight): if", "group to which it belongs leftmost = None for objekt in string.objects: if", "!= slipnet.sameness: letter_of_initial_string.changed = True workspace.changed_object = letter_of_initial_string logging.info(\"building replacement\") def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\")", "leftmost object in group with these bonds while search: search = False if", "category = first_bond.category assert category group_category = category.get_related_node(slipnet.group_category) logging.info(f\"trying from {source} to {category.name}\")", "codelet) return direction = first_bond.direction_category search = True bond_facet = None # find", "all objects are still there for o in group.object_list: assert o in workspace.objects", "logging.info(f\"Relation found: {relation.name}\") else: relation = None logging.info(\"no relation found\") letter_of_initial_string.replacement = Replacement(", "if direction == slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond if not", "== 1 and len(workspace.target) == 1: bond_density = 1.0 else: number_of_bonds = len(workspace.initial.bonds)", ") flip_target_object = True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def correspondence_strength_tester(codelet):", "= choose_neighbour(source) logging.info(f\"source: {source}, destination: {destination}\") assert destination bond_facet = __get_bond_facet(source, destination) source_descriptor,", "bond_facet = chosen_bond.facet bonds = possible_group_bonds(category, direction_category, bond_facet, bonds) assert bonds group_category =", "relevance = {initial_relevance}, \" f\"unhappiness = {int(initial_unhappiness)}\" ) logging.info( f\"target : relevance =", "category.get_related_node(slipnet.group_category) logging.info(f\"trying from {source} to {category.name}\") bond_facet = None # find leftmost object", "formulas.Temperature) / 100.0 assert not formulas.coin_flip(probability_of_fizzle) # choose a structure at random structures", "= ord(workspace.initial_string[position]) modified_ascii = ord(workspace.modified_string[position]) diff = initial_ascii - modified_ascii if abs(diff) <", "slipnet.string_position_category and m.initial_description_type != slipnet.bond_facet ] initial_description_types = [m.initial_description_type for m in opposites]", "relevance_method(workspace.initial, slipnode) target_relevance = relevance_method(workspace.target, slipnode) initial_unhappiness = workspace.initial.intra_string_unhappiness target_unhappiness = workspace.target.intra_string_unhappiness logging.info(", "the same direction incompatible_bonds = [] # incompatible bond list if len(group.object_list) >", "spans the string - propose this object group = leftmost coderack.propose_group( group.object_list, group.bond_list,", "= 100.0 bond.source_descriptor.buffer = 100.0 bond.destination_descriptor.buffer = 100.0 logging.info(\"succeeded: posting bond-builder\") coderack.new_codelet(\"bond-builder\", codelet,", "= 100.0 bond.destination_descriptor.buffer = 100.0 logging.info(\"succeeded: posting bond-builder\") coderack.new_codelet(\"bond-builder\", codelet, strength) def bond_builder(codelet):", "slipnet.sameness backward_bond = slipnet.sameness else: backward_bond = destination_descriptor.get_bond_category(source_descriptor) assert category in [forward_bond, backward_bond]", "+ correspondence.object_from_target.letter_span() ) for incompatible in incompatibles: incompatible_spans = ( incompatible.object_from_initial.letter_span() + incompatible.object_from_target.letter_span()", "depth = node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) relation", "3.0, incompatible_bond, 2.0 ) # won against incompatible bond incompatible_group = target.group if", "[] for node in object_list: depth = node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list +=", ") logging.info(f\"source chosen = {source}\") assert not source.spans_string() if source.leftmost: mydirection = slipnet.right", "destination logging.info(f\"to object: {destination}\") bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet,", "strength value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength() strength = group.total_strength", "o in group.object_list: assert o in workspace.objects # check to see if bonds", "def __get_bond_facet(source, destination): bond_facet = choose_bond_facet(source, destination) assert bond_facet return bond_facet def __get_descriptors(bond_facet,", "structure2, weight2): structure1.update_strength() structure2.update_strength() weighted_strength1 = formulas.temperature_adjusted_value( structure1.total_strength * weight1 ) weighted_strength2 =", "__get_descriptors( bond_facet, source, destination ) category = source_descriptor.get_bond_category(destination_descriptor) assert category if category ==", ") assert source assert not source.spans_string() if source.leftmost: direction = slipnet.right elif source.rightmost:", "direction = source.right_bond.direction_category destination = destination.right_bond.right_object search = True assert destination != source", "target_unhappiness randomized = random.random() * (relevances + unhappinesses) initials = initial_relevance + initial_unhappiness", "strength) def description_builder(codelet): description = codelet.arguments[0] assert description.object in workspace.objects if description.object.described(description.descriptor): description.description_type.buffer", "initial_descriptor = mapping.target_descriptor target_candidates = [] for objekt in workspace.target.objects: for description in", "top_down_description_scout(codelet): description_type = codelet.arguments[0] chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) descriptions =", ") assert object_from_initial.spans_string() == object_from_target.spans_string() # get the posible concept mappings concept_mappings =", "next_object: continue if right_bond.direction_category == group.direction_category: continue incompatible_bonds += [right_bond] next_object = objekt", "== 0 changed_objects = [o for o in workspace.initial.objects if o.changed] # assert", "containing these objects incompatible_groups = group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups, group, \"Groups\", 1.0, 1.0) for", "forward_bond = source_descriptor.get_bond_category(destination_descriptor) if forward_bond == slipnet.identity: forward_bond = slipnet.sameness backward_bond = slipnet.sameness", "# break incompatible group and bond if they exist if incompatible_bond: incompatible_bond.break_the_structure() if", "( not target_in_objects and (not (want_flip and target_not_flipped)) ) if correspondence.reflexive(): # if", "= formulas.select_list_position(values) chosen_property = descriptions[i] coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def description_strength_tester(codelet):", "bond {bond}\") bond.build_bond() # pylint: disable=too-many-branches # pylint: disable=too-many-statements def top_down_group_scout__category(codelet): group_category =", "in incompatible_bonds: incompatible.break_the_structure() # create new bonds group.bond_list = [] for i in", "formulas.choose_relevant_description_by_activation(chosen_object) assert description sliplinks = formulas.similar_property_links(description.descriptor) assert sliplinks values = [ sliplink.degree_of_association() *", "[o for o in workspace.initial.objects if isinstance(o, Letter)] letter_of_initial_string = random.choice(letters) logging.info(f\"selected letter", "[] if bond.left_object.leftmost or bond.right_object.rightmost: if bond.direction_category: incompatible_correspondences = bond.get_incompatible_correspondences() if incompatible_correspondences: logging.info(\"trying", "choose a random bond from list chosen_bond = random.choice(bonds) category = chosen_bond.category direction_category", "[left_bond] previous = objekt next_object = group.object_list[-1] for objekt in reversed(group.object_list[:-1]): right_bond =", ") # noinspection PyStringFormat def group_scout__whole_string(codelet): string = workspace.initial if random.random() > 0.5:", "group.update_strength() strength = group.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability", "object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) flip_target_object = True coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, )", "there of the same direction incompatible_bonds = [] # incompatible bond list if", "are there of the same direction incompatible_bonds = [] # incompatible bond list", "source.left_bond: continue if source.left_bond.category != category: continue if source.left_bond.direction_category != direction: if source.left_bond.direction_category:", "density > 0.8: distribution = [5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0,", "= codelet.arguments[0] description.descriptor.buffer = 100.0 description.update_strength() strength = description.total_strength probability = formulas.temperature_adjusted_probability(strength /", "object_from_initial in workspace.objects target_in_objects = object_from_target in workspace.objects assert initial_in_objects or ( not", "formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability # activate some concepts for mapping", "objekt in reversed(group.object_list[:-1]): right_bond = objekt.right_bond if right_bond: if right_bond.right_object == next_object: continue", "incompatible in incompatibles: incompatible.break_the_structure() # break incompatible group and bond if they exist", "= sliplinks[i] chosen_property = chosen.destination coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def top_down_description_scout(codelet):", "= True destination = source while search: search = False if not destination.right_bond:", "bond.right_object.rightmost: if bond.direction_category: incompatible_correspondences = bond.get_incompatible_correspondences() if incompatible_correspondences: logging.info(\"trying to break incompatible correspondences\")", "> rhs def __fight(structure, structure_weight, incompatibles, incompatible_weight): if not (incompatibles and len(incompatibles)): return", "if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return rule.update_strength() assert rule.total_strength # fight against other rules if", "= description.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet, strength) def", "codelet ) def group_strength_tester(codelet): # update strength value of the group group =", "direction_category, bond_facet, bonds) assert bonds group_category = category.get_related_node(slipnet.group_category) coderack.propose_group( objects, bonds, group_category, direction_category,", "object_list += [position] letter = changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter = [ o for o in", "1: bond_density = 1.0 else: number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds) nearly_total_length = len(workspace.initial)", "source while search: search = False if not destination.right_bond: continue if destination.right_bond.category !=", "there is an incompatible rule, fight against it incompatible_rule = None if workspace.rule:", "structure_weight, incompatibles, incompatible_weight): if not (incompatibles and len(incompatibles)): return True for incompatible in", "position = changed.get_descriptor(slipnet.string_position_category) if position: object_list += [position] letter = changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter =", "logging.info(f\"failed to break {name}: Fizzle\") return False logging.info(f\"no incompatible {name}\") return True def", "should this be += ?? assert object_list # use conceptual depth to choose", "+ len(workspace.target.bonds) nearly_total_length = len(workspace.initial) + len(workspace.target) - 2 bond_density = number_of_bonds /", "= slipnode for mapping in workspace.slippages(): if mapping.initial_descriptor == slipnode: initial_descriptor = mapping.target_descriptor", "target.leftmost or target.rightmost: # search for the incompatible bond incompatible_bond = correspondence.get_incompatible_bond() if", "for {type_name}\") else: logging.info(f\"initial string selected: {workspace.initial} for {type_name}\") source = choose_unmodified_object(\"intra_string_salience\", string.objects)", "source.left_bond else: first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond2\") else: logging.info(f\"first_bond2: {first_bond}\")", "0.8: distribution = [5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]", "object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target want_flip = correspondence.flip_target_object if want_flip: flipper =", "incompatible_groups = group.get_incompatible_groups() assert __fight_incompatibles(incompatible_groups, group, \"Groups\", 1.0, 1.0) for incompatible in incompatible_bonds:", "description.descriptor.buffer = 100.0 description.update_strength() strength = description.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert", "for incompatible in incompatible_groups: incompatible.break_the_structure() for incompatible in incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building bond {bond}\")", "* random.random() logging.info(f\"{weighted_strength1} > {rhs}: {weighted_strength1 > rhs}\") return weighted_strength1 > rhs def", "of its type in the string object_list = [] position = changed.get_descriptor(slipnet.string_position_category) if", "descriptors & fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return # check to see if all objects", "the posible concept mappings concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) assert", "chosen_object __show_which_string_object_is_from(chosen_object) description = formulas.choose_relevant_description_by_activation(chosen_object) assert description sliplinks = formulas.similar_property_links(description.descriptor) assert sliplinks values", "= 1.0 cutoff = __get_cut_off(bond_density) * 10.0 assert cutoff >= formulas.actual_temperature if workspace.rule.build_translated_rule():", "source = source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet ) # noinspection", "distinguishing distinguishing_mappings = [m for m in concept_mappings if m.distinguishing()] assert distinguishing_mappings #", ") weighted_strength2 = formulas.temperature_adjusted_value( structure2.total_strength * weight2 ) rhs = (weighted_strength1 + weighted_strength2)", "= relations[diff] logging.info(f\"Relation found: {relation.name}\") else: relation = None logging.info(\"no relation found\") letter_of_initial_string.replacement", "slipnet.right if mydirection == slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond if", "= group.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability # it", "(want_flip and target_not_flipped)) ) if correspondence.reflexive(): # if the correspondence exists, activate concept", "[source], [], ) probability = group.single_letter_group_probability() assert random.random() >= probability coderack.propose_single_letter_group(source, codelet) return", "source_descriptor, codelet, ) def top_down_bond_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance,", "= object_from_initial in workspace.objects target_in_objects = object_from_target in workspace.objects assert initial_in_objects or (", "1.0, incompatible_group, 1.0 ) # if there is an incompatible rule, fight against", "in distinguishing_mappings if m.initial_description_type == slipnet.string_position_category and m.initial_description_type != slipnet.bond_facet ] initial_description_types =", "codelets def breaker(): probability_of_fizzle = (100.0 - formulas.Temperature) / 100.0 assert not formulas.coin_flip(probability_of_fizzle)", "__show_which_string_object_is_from(chosen_object) description = formulas.choose_relevant_description_by_activation(chosen_object) assert description sliplinks = formulas.similar_property_links(description.descriptor) assert sliplinks values =", "are still there for o in group.object_list: assert o in workspace.objects # check", "f\"target : relevance = {target_relevance}, \" f\"unhappiness = {int(target_unhappiness)}\" ) string = workspace.initial", "the first object # ie. string-position (left-,right-most,middle or whole) or letter category #", "category # if it is the only one of its type in the", "[] objects = [leftmost] while leftmost.right_bond: bonds += [leftmost.right_bond] leftmost = leftmost.right_bond.right_object objects", "coderack.propose_bond( destination, source, category, bond_facet, destination_descriptor, source_descriptor, codelet, ) def top_down_bond_scout__direction(codelet): direction =", "source.left_bond else: first_bond = source.right_bond if not first_bond or first_bond.category != category: if", "# bond found - fight against it assert __structure_versus_structure( correspondence, 3.0, incompatible_bond, 2.0", "= formulas.temperature_adjusted_probability( structure.total_strength / 100.0 ) if formulas.coin_flip(break_probability): return for structure in break_objects:", "else: description.build() def bottom_up_bond_scout(codelet): source = choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source) destination = choose_neighbour(source) assert", "if len(incompatible_bonds): logging.info(str(incompatible_bonds[0])) assert __fight_incompatibles(incompatible_bonds, bond, \"bonds\", 1.0, 1.0) incompatible_groups = bond.source.get_common_groups(bond.destination) assert", "# and add new ones to the existing corr. existing = correspondence.object_from_initial.correspondence for", "assert destination bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination", "m in mappings if m.label != slipnet.opposite]) == 0 def __structure_versus_structure(structure1, weight1, structure2,", "# use conceptual depth to choose a relation value_list = [] for node", "source.left_bond.direction_category source = source.left_bond.left_object search = True # find rightmost object in group", "= source.right_bond.direction_category destination = destination.right_bond.right_object search = True assert destination != source logging.info(f\"proposing", "strength = correspondence.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability #", "{source}\") assert not source.spans_string() if source.leftmost: mydirection = slipnet.right elif source.rightmost: mydirection =", "[value] i = formulas.select_list_position(value_list) relation = object_list[i] coderack.propose_rule( slipnet.letter_category, descriptor, slipnet.letter, relation, codelet", "pylint: disable=too-many-branches # pylint: disable=too-many-statements def top_down_group_scout__category(codelet): group_category = codelet.arguments[0] category = group_category.get_related_node(slipnet.bond_category)", "strength) def group_builder(codelet): # update strength value of the group group = codelet.arguments[0]", "description.descriptor == initial_descriptor: target_candidates += [objekt] assert target_candidates object_from_target = choose_unmodified_object( \"inter_string_salience\", target_candidates", "[], ) probability = group.single_letter_group_probability() assert random.random() >= probability coderack.propose_single_letter_group(source, codelet) return direction", "1.0) # fight incompatible groups # fight all groups containing these objects incompatible_groups", "while leftmost.right_bond: bonds += [leftmost.right_bond] leftmost = leftmost.right_bond.right_object objects += [leftmost] assert leftmost.rightmost", "def description_builder(codelet): description = codelet.arguments[0] assert description.object in workspace.objects if description.object.described(description.descriptor): description.description_type.buffer =", "correspondence = codelet.arguments[0] object_from_initial = correspondence.object_from_initial object_from_target = correspondence.object_from_target assert object_from_initial in workspace.objects", "o in workspace.initial.objects if isinstance(o, Letter)] letter_of_initial_string = random.choice(letters) logging.info(f\"selected letter in initial", "incompatible_correspondences, 3.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() for incompatible in incompatible_groups: incompatible.break_the_structure() for", "is the only one of its type in the string object_list = []", "so fizzling\" ) return position = letter_of_initial_string.left_index more_letters = [ o for o", "= {0: slipnet.sameness, -1: slipnet.successor, 1: slipnet.predecessor} relation = relations[diff] logging.info(f\"Relation found: {relation.name}\")", "\"initial\" return \"other\" def __get_scout_source(slipnode, relevance_method, type_name): initial_relevance = relevance_method(workspace.initial, slipnode) target_relevance =", "workspace.objects for string_bond in bond.string.bonds: if bond.same_neighbours(string_bond) and bond.same_categories(string_bond): if bond.direction_category: bond.direction_category.buffer =", "chosen = {source}\") assert not source.spans_string() if source.leftmost: mydirection = slipnet.right elif source.rightmost:", "return True def __slippability(concept_mappings): for mapping in concept_mappings: slippiness = mapping.slippability() / 100.0", "= slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): mydirection", "group.direction_category == slipnet.right: source = object1 destination = object2 else: source = object2", "(relevances + unhappinesses) initials = initial_relevance + initial_unhappiness if randomized > initials: string", "[] slippages = workspace.slippages() for node in object_list: node = node.apply_slippages(slippages) if target_object.described(node):", "propose this object group = leftmost coderack.propose_group( group.object_list, group.bond_list, group.group_category, group.direction_category, group.facet, codelet,", "object in the workspace # object_list = the union of this and the", "incompatibles, structure, name, structure_weight, incompatible_weight ): if len(incompatibles): if __fight(structure, structure_weight, incompatibles, incompatible_weight):", "= [5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density", "bond incompatible_group = target.group if incompatible_group: assert __structure_versus_structure( correspondence, 1.0, incompatible_group, 1.0 )", "= None # find leftmost object in group with these bonds search =", "continue if destination.right_bond.category != category: continue if destination.right_bond.direction_category != direction: if destination.right_bond.direction_category: continue", "= formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage): return True return False # start the actual codelets", "description.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet, strength) def description_builder(codelet):", "if destination.right_bond.direction_category != direction: if destination.right_bond.direction_category: continue if not bond_facet or bond_facet ==", "= destination.get_descriptor(bond_facet) assert source_descriptor assert destination_descriptor return source_descriptor, destination_descriptor def __all_opposite_mappings(mappings): return len([m", "probability coderack.new_codelet(\"rule-builder\", codelet, rule.total_strength, rule) def replacement_finder(): # choose random letter in initial", "depth to choose a relation value_list = [] for node in object_list: depth", ") rhs = (weighted_strength1 + weighted_strength2) * random.random() logging.info(f\"{weighted_strength1} > {rhs}: {weighted_strength1 >", "1.0) # fight all incompatible correspondences incompatible_correspondences = [] if bond.left_object.leftmost or bond.right_object.rightmost:", "bonds += [leftmost.right_bond] leftmost = leftmost.right_bond.right_object objects += [leftmost] assert leftmost.rightmost # choose", "codelet) changed = changed_objects[-1] # generate a list of distinguishing descriptions for the", "object1 destination = object2 else: source = object2 destination = object1 category =", "changed_objects[-1] # generate a list of distinguishing descriptions for the first object #", "direction == slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond", "- fight against it assert __structure_versus_structure( correspondence, 3.0, incompatible_bond, 2.0 ) # won", "= __get_scout_source( category, formulas.local_bond_category_relevance, \"bond\" ) destination = choose_neighbour(source) logging.info(f\"source: {source}, destination: {destination}\")", "direction = source.left_bond.direction_category source = source.left_bond.left_object search = True destination = source search", "= leftmost.group if leftmost.spans_string(): # the object already spans the string - propose", "and bond.same_categories(string_bond): if bond.direction_category: bond.direction_category.buffer = 100.0 bond.category.buffer = 100.0 logging.info(\"already exists: activate", "= codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"direction\" ) logging.info(f\"source chosen = {source}\")", "else: number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds) nearly_total_length = len(workspace.initial) + len(workspace.target) - 2", "group.object_list: assert o in workspace.objects # check to see if bonds are there", "/ 100.0 assert not formulas.coin_flip(probability_of_fizzle) # choose a structure at random structures =", "+= [source.right_bond.right_object] source = source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet )", "# pylint: disable=too-many-statements def top_down_group_scout__category(codelet): group_category = codelet.arguments[0] category = group_category.get_related_node(slipnet.bond_category) assert category", "logging.info(f\"source descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\") category = source_descriptor.get_bond_category(destination_descriptor) assert category if category", "# check to see if bonds are there of the same direction incompatible_bonds", "node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) descriptor = object_list[i]", "= formulas.temperature_adjusted_value( structure1.total_strength * weight1 ) weighted_strength2 = formulas.temperature_adjusted_value( structure2.total_strength * weight2 )", "structures structure = random.choice(structures) __show_which_string_object_is_from(structure) break_objects = [structure] if isinstance(structure, Bond): if structure.source.group:", "= correspondence.get_incompatible_bond() if incompatible_bond: # bond found - fight against it assert __structure_versus_structure(", "structure_weight, incompatible_weight ): if len(incompatibles): if __fight(structure, structure_weight, incompatibles, incompatible_weight): logging.info(f\"broke the {name}\")", "changed objects, propose a rule with no changes if not changed_objects: return coderack.propose_rule(None,", "opposites = [ m for m in distinguishing_mappings if m.initial_description_type == slipnet.string_position_category and", "= objekt next_object = group.object_list[-1] for objekt in reversed(group.object_list[:-1]): right_bond = objekt.right_bond if", "object in group with these bonds search = True while search: search =", "1.0: bond_density = 1.0 cutoff = __get_cut_off(bond_density) * 10.0 assert cutoff >= formulas.actual_temperature", "0.4: distribution = [1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0]", "break_objects: structure.break_the_structure() def bottom_up_description_scout(codelet): chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) description =", "ord(workspace.modified_string[position]) diff = initial_ascii - modified_ascii if abs(diff) < 2: relations = {0:", "if forward_bond == slipnet.identity: forward_bond = slipnet.sameness backward_bond = slipnet.sameness else: backward_bond =", "destination: {destination}\") assert destination bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet,", "for o in workspace.initial.objects if isinstance(o, Letter)] letter_of_initial_string = random.choice(letters) logging.info(f\"selected letter in", "in incompatibles: if not __structure_versus_structure( structure, structure_weight, incompatible, incompatible_weight ): logging.info(f\"lost fight with", "__structure_versus_structure( correspondence, correspondence_spans, incompatible, incompatible_spans ) incompatible_bond = None incompatible_group = None #", "category = codelet.arguments[0] source = __get_scout_source( category, formulas.local_bond_category_relevance, \"bond\" ) destination = choose_neighbour(source)", "True def __slippability(concept_mappings): for mapping in concept_mappings: slippiness = mapping.slippability() / 100.0 probability_of_slippage", "this be += ?? assert object_list # use conceptual depth to choose a", "= workspace.initial.intra_string_unhappiness target_unhappiness = workspace.target.intra_string_unhappiness logging.info( f\"initial : relevance = {initial_relevance}, \" f\"unhappiness", "== slipnet.sameness: leftmost = leftmost.group if leftmost.spans_string(): # the object already spans the", "__structure_versus_structure( structure, structure_weight, incompatible, incompatible_weight ): logging.info(f\"lost fight with {incompatible}\") return False logging.info(f\"won", "the group group = codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength() strength = group.total_strength probability = formulas.temperature_adjusted_probability(strength", "= __get_scout_source( direction, formulas.local_direction_category_relevance, \"direction\" ) logging.info(f\"source chosen = {source}\") assert not source.spans_string()", "new_list # should this be += ?? assert object_list # use conceptual depth", "assert not source.spans_string() if source.leftmost: direction = slipnet.right elif source.rightmost: direction = slipnet.left", "description = codelet.arguments[0] description.descriptor.buffer = 100.0 description.update_strength() strength = description.total_strength probability = formulas.temperature_adjusted_probability(strength", "incompatible correspondences\") assert __fight(bond, 2.0, incompatible_correspondences, 3.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() for", "elif density > 0.2: distribution = [1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0,", "if want_flip: flipper = object_from_target.flipped_version() target_not_flipped = not workspace.target.equivalent_group(flipper) else: target_not_flipped = False", "[node] object_list = new_list # should this be += ?? assert object_list #", "if source.left_bond.direction_category: continue if not bond_facet or bond_facet == source.left_bond.facet: bond_facet = source.left_bond.facet", "category, formulas.local_bond_category_relevance, \"group\" ) assert source assert not source.spans_string() if source.leftmost: direction =", "== next_object: continue if right_bond.direction_category == group.direction_category: continue incompatible_bonds += [right_bond] next_object =", "/ 100.0 probability_of_slippage = formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage): return True return False # start", "== 1: bond_density = 1.0 else: number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds) nearly_total_length =", "coderack.propose_correspondence( object_from_initial, object_from_target, concept_mappings, flip_target_object, codelet, ) def important_object_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"relative_importance\",", "1.0 ) for incompatible in incompatibles: incompatible.break_the_structure() # break incompatible group and bond", "o.get_description_type(letter) ] if not len(other_objects_of_same_letter): object_list += [letter] # if this object corresponds", "with these bonds search = True destination = source while search: search =", "group.bond_list = [] for i in range(1, len(group.object_list)): object1 = group.object_list[i - 1]", "[object1.right_bond] for incompatible in incompatible_groups: incompatible.break_the_structure() group.build_group() group.activate_descriptions() logging.info(\"building group\") def rule_builder(codelet): rule", "= __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination ) forward_bond =", "descriptions for the first object # ie. string-position (left-,right-most,middle or whole) or letter", "-= 1 initial_ascii = ord(workspace.initial_string[position]) modified_ascii = ord(workspace.modified_string[position]) diff = initial_ascii - modified_ascii", "= {letter_of_initial_string}\") if letter_of_initial_string.replacement: logging.info( f\"Replacement already found for {letter_of_initial_string}, so fizzling\" )", "= slipnet.right if mydirection == slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond", "& the highest group to which it belongs leftmost = None for objekt", "= choose_bond_facet(source, destination) assert bond_facet return bond_facet def __get_descriptors(bond_facet, source, destination): source_descriptor =", "in workspace.target.objects: for description in objekt.relevant_descriptions(): if description.descriptor == initial_descriptor: target_candidates += [objekt]", "formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) relation = object_list[i] coderack.propose_rule( slipnet.letter_category, descriptor,", "formulas.coin_flip(probability) bond.facet.buffer = 100.0 bond.source_descriptor.buffer = 100.0 bond.destination_descriptor.buffer = 100.0 logging.info(\"succeeded: posting bond-builder\")", "destination: bonds += [source.right_bond] objects += [source.right_bond.right_object] source = source.right_bond.right_object coderack.propose_group( objects, bonds,", "!= changed and o.get_description_type(letter) ] if not len(other_objects_of_same_letter): object_list += [letter] # if", "__fight_incompatibles(incompatible_groups, bond, \"groups\", 1.0, 1.0) # fight all incompatible correspondences incompatible_correspondences = []", "group.direction_category: group.direction_category.buffer = 100.0 coderack.new_codelet(\"group-builder\", codelet, strength) def group_builder(codelet): # update strength value", "\"groups\", 1.0, 1.0) # fight all incompatible correspondences incompatible_correspondences = [] if bond.left_object.leftmost", "== position ] letter_of_modified_string = more_letters and more_letters[0] or None assert letter_of_modified_string position", "= True bond_facet = None # find leftmost object in group with these", "bond incompatible_bond = correspondence.get_incompatible_bond() if incompatible_bond: # bond found - fight against it", "[structure] if isinstance(structure, Bond): if structure.source.group: if structure.source.group == structure.destination.group: break_objects += [structure.source.group]", "logging.info(f\"initial string selected: {workspace.initial}\") # find leftmost object & the highest group to", "equivalent: logging.info(\"already exists...activate descriptors & fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return # check to see", "break_objects = [structure] if isinstance(structure, Bond): if structure.source.group: if structure.source.group == structure.destination.group: break_objects", "category == slipnet.sameness and isinstance(source, Letter): group = Group( source.string, slipnet.sameness_group, None, slipnet.letter_category,", "= __get_descriptors( bond_facet, source, destination ) forward_bond = source_descriptor.get_bond_category(destination_descriptor) if forward_bond == slipnet.identity:", "100.0 bond.source_descriptor.buffer = 100.0 bond.destination_descriptor.buffer = 100.0 logging.info(\"succeeded: posting bond-builder\") coderack.new_codelet(\"bond-builder\", codelet, strength)", "concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) flip_target_object = True coderack.propose_correspondence( object_from_initial,", "the union of this and the distingushing descriptors if changed.correspondence: target_object = changed.correspondence.object_from_target", "first object # ie. string-position (left-,right-most,middle or whole) or letter category # if", "workspace.objects target_in_objects = object_from_target in workspace.objects assert initial_in_objects or ( not target_in_objects and", "from .workspace_formulas import choose_bond_facet from .workspace_formulas import choose_directed_neighbor from .workspace_formulas import choose_neighbour from", "destination) assert bond_facet return bond_facet def __get_descriptors(bond_facet, source, destination): source_descriptor = source.get_descriptor(bond_facet) destination_descriptor", ") else: coderack.propose_bond( destination, source, category, bond_facet, destination_descriptor, source_descriptor, codelet, ) def top_down_bond_scout__direction(codelet):", "source.right_bond if not first_bond or first_bond.category != category: # check the other side", "+= [node] object_list = new_list # should this be += ?? assert object_list", "for {bond}\") assert formulas.coin_flip(probability) bond.facet.buffer = 100.0 bond.source_descriptor.buffer = 100.0 bond.destination_descriptor.buffer = 100.0", "[leftmost] assert leftmost.rightmost # choose a random bond from list chosen_bond = random.choice(bonds)", "description.description_type.buffer = 100.0 description.descriptor.buffer = 100.0 else: description.build() def bottom_up_bond_scout(codelet): source = choose_unmodified_object(\"intra_string_salience\",", "1: previous = group.object_list[0] for objekt in group.object_list[1:]: left_bond = objekt.left_bond if left_bond:", "formulas from . import temperature from .bond import Bond from .bond import possible_group_bonds", "= [1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0] elif density", "group group = codelet.arguments[0] __show_which_string_object_is_from(group) group.update_strength() strength = group.total_strength probability = formulas.temperature_adjusted_probability(strength /", "# noinspection PyStringFormat def group_scout__whole_string(codelet): string = workspace.initial if random.random() > 0.5: string", "logging.info(f\"target string selected: {workspace.target} for {type_name}\") else: logging.info(f\"initial string selected: {workspace.initial} for {type_name}\")", "incompatible.break_the_structure() for incompatible in incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building bond {bond}\") bond.build_bond() # pylint: disable=too-many-branches", "rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return rule.update_strength() assert rule.total_strength # fight against other rules if workspace.rule:", "= source.left_bond.direction_category source = source.left_bond.left_object search = True destination = source search =", "initial_relevance + initial_unhappiness if randomized > initials: string = workspace.target logging.info(f\"target string selected:", "[source.right_bond.right_object] source = source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet ) #", "= codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() strength = bond.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) logging.info(f\"bond", "density > 0.2: distribution = [1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0,", "workspace.slippages(): if mapping.initial_descriptor == slipnode: initial_descriptor = mapping.target_descriptor target_candidates = [] for objekt", "= object_from_target in workspace.objects assert initial_in_objects or ( not target_in_objects and (not (want_flip", "m.initial_description_type == slipnet.string_position_category and m.initial_description_type != slipnet.bond_facet ] initial_description_types = [m.initial_description_type for m", "if incompatibles: correspondence_spans = ( correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span() ) for incompatible in incompatibles:", "= choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects ) object_from_target = choose_unmodified_object( \"inter_string_salience\", workspace.target.objects ) assert object_from_initial.spans_string()", "from .coderack import coderack from .correspondence import Correspondence from .group import Group from", "group.direction_category: continue incompatible_bonds += [right_bond] next_object = objekt # if incompatible bonds exist", "initials: string = workspace.target logging.info(f\"target string selected: {workspace.target} for {type_name}\") else: logging.info(f\"initial string", "activate some concepts for mapping in correspondence.concept_mappings: mapping.initial_description_type.buffer = 100.0 mapping.initial_descriptor.buffer = 100.0", "fight all incompatible correspondences incompatible_correspondences = [] if bond.left_object.leftmost or bond.right_object.rightmost: if bond.direction_category:", "distribution = [1.0, 1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0] stop", "to choose a relation value_list = [] for node in object_list: depth =", "bond_descriptors logging.info(f\"source descriptor: {source_descriptor.name.upper()}\") logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\") category = source_descriptor.get_bond_category(destination_descriptor) assert category if", "incompatible correspondences incompatible_correspondences = [] if bond.left_object.leftmost or bond.right_object.rightmost: if bond.direction_category: incompatible_correspondences =", "assert __fight_incompatibles(incompatible_bonds, group, \"bonds\", 1.0, 1.0) # fight incompatible groups # fight all", "group.object_list[i] if not object1.right_bond: if group.direction_category == slipnet.right: source = object1 destination =", "initial_description_types and __all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation != 100.0 ): object_from_target = object_from_target.flipped_version() concept_mappings =", "incompatible in incompatible_groups: incompatible.break_the_structure() for incompatible in incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building bond {bond}\") bond.build_bond()", "not first_bond.direction_category: direction = None assert first_bond assert first_bond.direction_category == direction logging.info(f\"possible group:", "from . import temperature from .bond import Bond from .bond import possible_group_bonds from", "check to see if all objects are still there for o in group.object_list:", "= __get_bond_facet(source, destination) logging.info(f\"chosen bond facet: {bond_facet.get_name()}\") logging.info(f\"Source: {source}, destination: {destination}\") bond_descriptors =", "incompatible in incompatibles: if not __structure_versus_structure( structure, structure_weight, incompatible, incompatible_weight ): logging.info(f\"lost fight", "group.object_list[1:]: left_bond = objekt.left_bond if left_bond: if left_bond.left_object == previous: continue if left_bond.direction_category", "assert slipnode initial_descriptor = slipnode for mapping in workspace.slippages(): if mapping.initial_descriptor == slipnode:", "destination = destination.right_bond.right_object search = True assert destination != source objects = [source]", "= [] for objekt in workspace.target.objects: for description in objekt.relevant_descriptions(): if description.descriptor ==", "value = formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) relation = object_list[i] coderack.propose_rule(", "assert first_bond assert first_bond.direction_category == direction logging.info(f\"possible group: {first_bond}\") category = first_bond.category assert", "assert __structure_versus_structure( correspondence, 1.0, incompatible_group, 1.0 ) # if there is an incompatible", "\"inter_string_salience\", target_candidates ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get the posible concept mappings", "source.get_descriptor(facet), destination.get_descriptor(facet), ) new_bond.build_bond() group.bond_list += [object1.right_bond] for incompatible in incompatible_groups: incompatible.break_the_structure() group.build_group()", "= correspondence.object_from_initial object_from_target = correspondence.object_from_target assert object_from_initial in workspace.objects assert ( object_from_target in", "from .group import Group from .letter import Letter from .replacement import Replacement from", "and slipnet.direction_category in initial_description_types and __all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation != 100.0 ): object_from_target =", "source = source.left_bond.left_object search = True # find rightmost object in group with", "source.left_bond.direction_category source = source.left_bond.left_object search = True destination = source search = True", ".group import Group from .letter import Letter from .replacement import Replacement from .slipnet", "source search = True while search: search = False if not destination.right_bond: continue", "100.0 ) if formulas.coin_flip(break_probability): return for structure in break_objects: structure.break_the_structure() def bottom_up_description_scout(codelet): chosen_object", "for objekt in workspace.target.objects: for description in objekt.relevant_descriptions(): if description.descriptor == initial_descriptor: target_candidates", "codelet.arguments[0] chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) descriptions = chosen_object.get_possible_descriptions(description_type) assert descriptions", "if right_bond.direction_category == group.direction_category: continue incompatible_bonds += [right_bond] next_object = objekt # if", "[objekt] assert target_candidates object_from_target = choose_unmodified_object( \"inter_string_salience\", target_candidates ) assert object_from_initial.spans_string() == object_from_target.spans_string()", "highest group to which it belongs leftmost = None for objekt in string.objects:", "import logging import random from . import formulas from . import temperature from", "\"group\" ) assert source assert not source.spans_string() if source.leftmost: direction = slipnet.right elif", "structure2.update_strength() weighted_strength1 = formulas.temperature_adjusted_value( structure1.total_strength * weight1 ) weighted_strength2 = formulas.temperature_adjusted_value( structure2.total_strength *", "probability coderack.propose_single_letter_group(source, codelet) return direction = first_bond.direction_category search = True bond_facet = None", "from .workspace_formulas import choose_neighbour from .workspace_formulas import choose_unmodified_object from .workspace_formulas import workspace from", "else: relation = None logging.info(\"no relation found\") letter_of_initial_string.replacement = Replacement( letter_of_initial_string, letter_of_modified_string, relation", "# choose random letter in initial string letters = [o for o in", "= workspace.slippages() for node in object_list: node = node.apply_slippages(slippages) if target_object.described(node): if target_object.distinguishing_descriptor(node):", "for objekt in group.object_list[1:]: left_bond = objekt.left_bond if left_bond: if left_bond.left_object == previous:", "( object_from_initial.spans_string() and object_from_target.spans_string() and slipnet.direction_category in initial_description_types and __all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation !=", ") return position = letter_of_initial_string.left_index more_letters = [ o for o in workspace.modified.objects", "__structure_versus_structure( correspondence, 1.0, incompatible_group, 1.0 ) # if there is an incompatible rule,", "for structure in break_objects: break_probability = formulas.temperature_adjusted_probability( structure.total_strength / 100.0 ) if formulas.coin_flip(break_probability):", "strong enough - post builder & activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0 if group.direction_category:", "flipper = object_from_target.flipped_version() target_not_flipped = not workspace.target.equivalent_group(flipper) else: target_not_flipped = False initial_in_objects =", "+ initial_unhappiness if randomized > initials: string = workspace.target logging.info(f\"target string selected: {workspace.target}", "= slipnet.sameness backward_bond = slipnet.sameness else: backward_bond = destination_descriptor.get_bond_category(source_descriptor) assert category in [forward_bond,", "bond_facet, codelet ) def top_down_group_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance,", "workspace.initial if random.random() > 0.5: string = workspace.target logging.info(f\"target string selected: {workspace.target}\") else:", "objects span the strings, check to see if the # string description needs", "def bottom_up_description_scout(codelet): chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) description = formulas.choose_relevant_description_by_activation(chosen_object) assert", "bond_facet = source.left_bond.facet direction = source.left_bond.direction_category source = source.left_bond.left_object search = True destination", "= codelet.arguments[0] chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) descriptions = chosen_object.get_possible_descriptions(description_type) assert", "logging.info(f\"destination descriptor: {destination_descriptor.name.upper()}\") category = source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category", "def top_down_group_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"direction\" ) logging.info(f\"source", "else: logging.info(f\"first_bond2: {first_bond}\") if first_bond and not first_bond.direction_category: direction = None assert first_bond", "return source def __get_bond_facet(source, destination): bond_facet = choose_bond_facet(source, destination) assert bond_facet return bond_facet", "more_letters[0] or None assert letter_of_modified_string position -= 1 initial_ascii = ord(workspace.initial_string[position]) modified_ascii =", "position: object_list += [position] letter = changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter = [ o for o", "rule = codelet.arguments[0] rule.update_strength() probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0) assert random.random() <= probability", "category == forward_bond: coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) else:", "description.descriptor.buffer = 100.0 else: description.build() def bottom_up_bond_scout(codelet): source = choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source) destination", "= [m for m in concept_mappings if m.distinguishing()] assert distinguishing_mappings # if both", "if direction == slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond if not", "100 temperature.clamped = True formulas.Temperature = 100.0 def bottom_up_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"inter_string_salience\",", "correspondence, 3.0, incompatible_bond, 2.0 ) # won against incompatible bond incompatible_group = target.group", "codelet.arguments[0] description.descriptor.buffer = 100.0 description.update_strength() strength = description.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0)", "no changed objects, propose a rule with no changes if not changed_objects: return", "= codelet.arguments[0] rule.update_strength() probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0) assert random.random() <= probability coderack.new_codelet(\"rule-builder\",", "letter_of_initial_string.replacement = Replacement( letter_of_initial_string, letter_of_modified_string, relation ) if relation != slipnet.sameness: letter_of_initial_string.changed =", "= correspondence.object_from_target want_flip = correspondence.flip_target_object if want_flip: flipper = object_from_target.flipped_version() target_not_flipped = not", "None # find leftmost object in group with these bonds while search: search", "source_descriptor, destination_descriptor, codelet, ) else: coderack.propose_bond( destination, source, category, bond_facet, destination_descriptor, source_descriptor, codelet,", "value_list += [value] i = formulas.select_list_position(value_list) descriptor = object_list[i] # choose the relation", "= object_list[i] # choose the relation (change the letmost object to \"successor\" or", "destination != source logging.info(f\"proposing group from {source} to {destination}\") objects = [source] bonds", "letter_of_modified_string position -= 1 initial_ascii = ord(workspace.initial_string[position]) modified_ascii = ord(workspace.modified_string[position]) diff = initial_ascii", "first_bond and not first_bond.direction_category: direction = None assert first_bond assert first_bond.direction_category == direction", "bond.facet.buffer = 100.0 bond.source_descriptor.buffer = 100.0 bond.destination_descriptor.buffer = 100.0 logging.info(\"succeeded: posting bond-builder\") coderack.new_codelet(\"bond-builder\",", "elif source.rightmost: direction = slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if", "logging.info( f\"initial : relevance = {initial_relevance}, \" f\"unhappiness = {int(initial_unhappiness)}\" ) logging.info( f\"target", "replacement\") def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category = codelet.arguments[0] source = __get_scout_source( category, formulas.local_bond_category_relevance, \"bond\"", "__show_which_string_object_is_from(group) group.update_strength() strength = group.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <=", "f\"unhappiness = {int(target_unhappiness)}\" ) string = workspace.initial relevances = initial_relevance + target_relevance unhappinesses", "{bond}\") assert formulas.coin_flip(probability) bond.facet.buffer = 100.0 bond.source_descriptor.buffer = 100.0 bond.destination_descriptor.buffer = 100.0 logging.info(\"succeeded:", "[] for objekt in workspace.target.objects: for description in objekt.relevant_descriptions(): if description.descriptor == initial_descriptor:", "assert destination logging.info(f\"to object: {destination}\") bond_facet = __get_bond_facet(source, destination) source_descriptor, destination_descriptor = __get_descriptors(", "list of distinguishing descriptions for the first object # ie. string-position (left-,right-most,middle or", "break {name}: Fizzle\") return False logging.info(f\"no incompatible {name}\") return True def __slippability(concept_mappings): for", "with {incompatible}\") return True def __fight_incompatibles( incompatibles, structure, name, structure_weight, incompatible_weight ): if", "destination = choose_neighbour(source) logging.info(f\"source: {source}, destination: {destination}\") assert destination bond_facet = __get_bond_facet(source, destination)", "= random.choice(bonds) category = chosen_bond.category direction_category = chosen_bond.direction_category bond_facet = chosen_bond.facet bonds =", "= destination_descriptor.get_bond_category(source_descriptor) assert category in [forward_bond, backward_bond] if category == forward_bond: coderack.propose_bond( source,", "sliplink.degree_of_association() * sliplink.destination.activation for sliplink in sliplinks ] i = formulas.select_list_position(values) chosen =", "these bonds while search: search = False if not source.left_bond: continue if source.left_bond.category", "incompatible_bond = None incompatible_group = None # if there is an incompatible bond", "fight against other rules if workspace.rule: assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0) workspace.build_rule(rule) def", "random.random() total = 0.0 for i in range(0, len(distribution)): total += distribution[i] if", "1 and len(workspace.target) == 1: bond_density = 1.0 else: number_of_bonds = len(workspace.initial.bonds) +", "= None if not first_bond or first_bond.direction_category != direction: if mydirection == slipnet.right:", "= formulas.temperature_adjusted_probability(strength / 100.0) logging.info(f\"bond strength = {strength} for {bond}\") assert formulas.coin_flip(probability) bond.facet.buffer", "[changed.replacement.relation] object_list += [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] # use conceptual depth to choose a", "while source != destination: bonds += [source.right_bond] objects += [source.right_bond.right_object] source = source.right_bond.right_object", "continue if not bond_facet or bond_facet == destination.right_bond.facet: bond_facet = destination.right_bond.facet direction =", "correspondence.object_from_initial.correspondence for mapping in correspondence.concept_mappings: if mapping.label: mapping.label.buffer = 100.0 if not mapping.is_contained_by(existing.concept_mappings):", "if category == slipnet.identity: category = slipnet.sameness coderack.propose_bond( source, destination, category, bond_facet, source_descriptor,", "object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) assert concept_mappings assert __slippability(concept_mappings) # find out if", "\"inter_string_salience\", workspace.initial.objects ) object_from_target = choose_unmodified_object( \"inter_string_salience\", workspace.target.objects ) assert object_from_initial.spans_string() == object_from_target.spans_string()", "= [1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0] else: distribution", "] i = formulas.select_list_position(values) chosen = sliplinks[i] chosen_property = chosen.destination coderack.propose_description( chosen_object, chosen_property.category(),", "leftmost.group and leftmost.group.bond_category == slipnet.sameness: leftmost = leftmost.group if leftmost.spans_string(): # the object", "m for m in distinguishing_mappings if m.initial_description_type == slipnet.string_position_category and m.initial_description_type != slipnet.bond_facet", "incompatible_bonds: incompatible.break_the_structure() for incompatible in incompatible_groups: incompatible.break_the_structure() for incompatible in incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building", "there is an incompatible bond then fight against it initial = correspondence.object_from_initial target", "source = object2 destination = object1 category = group.group_category.get_related_node(slipnet.bond_category) facet = group.facet new_bond", "\" f\"unhappiness = {int(target_unhappiness)}\" ) string = workspace.initial relevances = initial_relevance + target_relevance", "True logging.info(f\"failed to break {name}: Fizzle\") return False logging.info(f\"no incompatible {name}\") return True", "bond.source in workspace.objects or bond.destination in workspace.objects for string_bond in bond.string.bonds: if bond.same_neighbours(string_bond)", "= source.right_bond.right_object coderack.propose_group( objects, bonds, group_category, direction, bond_facet, codelet ) def top_down_group_scout__direction(codelet): direction", "weighted_strength2 = formulas.temperature_adjusted_value( structure2.total_strength * weight2 ) rhs = (weighted_strength1 + weighted_strength2) *", "= __get_descriptors( bond_facet, source, destination ) category = source_descriptor.get_bond_category(destination_descriptor) assert category if category", "None for objekt in string.objects: if objekt.leftmost: leftmost = objekt while leftmost.group and", "whole) or letter category # if it is the only one of its", "== slipnet.identity: category = slipnet.sameness logging.info(f\"proposing {category.name} bond \") coderack.propose_bond( source, destination, category,", "and isinstance(source, Letter): group = Group( source.string, slipnet.sameness_group, None, slipnet.letter_category, [source], [], )", "= object2 destination = object1 category = group.group_category.get_related_node(slipnet.bond_category) facet = group.facet new_bond =", "exist - fight group.update_strength() assert __fight_incompatibles(incompatible_bonds, group, \"bonds\", 1.0, 1.0) # fight incompatible", "= 100.0 def bottom_up_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects ) object_from_target = choose_unmodified_object(", "some concepts for mapping in correspondence.concept_mappings: mapping.initial_description_type.buffer = 100.0 mapping.initial_descriptor.buffer = 100.0 mapping.target_description_type.buffer", "logging.info(f\"Source: {source}, destination: {destination}\") bond_descriptors = __get_descriptors(bond_facet, source, destination) source_descriptor, destination_descriptor = bond_descriptors", "assert bond.source in workspace.objects or bond.destination in workspace.objects for string_bond in bond.string.bonds: if", "+= [letter] # if this object corresponds to another object in the workspace", "= object2 else: source = object2 destination = object1 category = group.group_category.get_related_node(slipnet.bond_category) facet", "for o in workspace.modified.objects if isinstance(o, Letter) and o.left_index == position ] letter_of_modified_string", "slipnet.sameness: letter_of_initial_string.changed = True workspace.changed_object = letter_of_initial_string logging.info(\"building replacement\") def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category", "backward_bond = destination_descriptor.get_bond_category(source_descriptor) assert category in [forward_bond, backward_bond] if category == forward_bond: coderack.propose_bond(", "leftmost.group.bond_category == slipnet.sameness: leftmost = leftmost.group if leftmost.spans_string(): # the object already spans", "continue if left_bond.direction_category == group.direction_category: continue incompatible_bonds += [left_bond] previous = objekt next_object", "sliplink.destination.activation for sliplink in sliplinks ] i = formulas.select_list_position(values) chosen = sliplinks[i] chosen_property", "bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() strength = bond.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0)", "relation ) if relation != slipnet.sameness: letter_of_initial_string.changed = True workspace.changed_object = letter_of_initial_string logging.info(\"building", "{source}, destination: {destination}\") bond_descriptors = __get_descriptors(bond_facet, source, destination) source_descriptor, destination_descriptor = bond_descriptors logging.info(f\"source", "= formulas.select_list_position(value_list) relation = object_list[i] coderack.propose_rule( slipnet.letter_category, descriptor, slipnet.letter, relation, codelet ) def", "= codelet.arguments[0] if rule.rule_equal(workspace.rule): rule.activate_rule_descriptions() return rule.update_strength() assert rule.total_strength # fight against other", "= group.facet new_bond = Bond( source, destination, category, facet, source.get_descriptor(facet), destination.get_descriptor(facet), ) new_bond.build_bond()", "correspondence, 1.0, incompatible_group, 1.0 ) # if there is an incompatible rule, fight", "= choose_directed_neighbor(source, direction) assert destination logging.info(f\"to object: {destination}\") bond_facet = __get_bond_facet(source, destination) source_descriptor,", "+= [value] i = formulas.select_list_position(value_list) relation = object_list[i] coderack.propose_rule( slipnet.letter_category, descriptor, slipnet.letter, relation,", "choose_unmodified_object( \"inter_string_salience\", workspace.target.objects ) assert object_from_initial.spans_string() == object_from_target.spans_string() # get the posible concept", "{type_name}\") source = choose_unmodified_object(\"intra_string_salience\", string.objects) return source def __get_bond_facet(source, destination): bond_facet = choose_bond_facet(source,", "temperature.clamped = True formulas.Temperature = 100.0 def bottom_up_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"inter_string_salience\", workspace.initial.objects", "= 100.0 coderack.new_codelet(\"correspondence-builder\", codelet, strength, correspondence) def correspondence_builder(codelet): correspondence = codelet.arguments[0] object_from_initial =", "coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def bond_strength_tester(codelet): bond =", "any are distinguishing distinguishing_mappings = [m for m in concept_mappings if m.distinguishing()] assert", "it incompatible_rule = None if workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule = workspace.rule assert __structure_versus_structure(", "direction = codelet.arguments[0] source = __get_scout_source( direction, formulas.local_direction_category_relevance, \"direction\" ) logging.info(f\"source chosen =", "first_bond.direction_category == direction logging.info(f\"possible group: {first_bond}\") category = first_bond.category assert category group_category =", "see if the # string description needs to be flipped opposites = [", "exists...activate descriptors & fizzle\") group.activate_descriptions() equivalent.add_descriptions(group.descriptions) return # check to see if all", "= objekt.left_bond if left_bond: if left_bond.left_object == previous: continue if left_bond.direction_category == group.direction_category:", "in incompatible_groups: incompatible.break_the_structure() group.build_group() group.activate_descriptions() logging.info(\"building group\") def rule_builder(codelet): rule = codelet.arguments[0] if", "in workspace.objects target_in_objects = object_from_target in workspace.objects assert initial_in_objects or ( not target_in_objects", "direction_category, bond_facet, codelet ) def group_strength_tester(codelet): # update strength value of the group", "coderack.new_codelet(\"group-builder\", codelet, strength) def group_builder(codelet): # update strength value of the group group", "= source.right_bond if not first_bond: logging.info(\"no first_bond2\") else: logging.info(f\"first_bond2: {first_bond}\") if first_bond and", "workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule = workspace.rule assert __structure_versus_structure( correspondence, 1.0, incompatible_rule, 1.0 ) for incompatible", "category, formulas.local_bond_category_relevance, \"bond\" ) destination = choose_neighbour(source) logging.info(f\"source: {source}, destination: {destination}\") assert destination", "1.0, incompatible_rule, 1.0 ) for incompatible in incompatibles: incompatible.break_the_structure() # break incompatible group", "for s in workspace.structures if isinstance(s, (Group, Bond, Correspondence)) ] assert structures structure", "[structure.source.group] # try to break all objects for structure in break_objects: break_probability =", "if group.direction_category == slipnet.right: source = object1 destination = object2 else: source =", "{incompatible}\") return False logging.info(f\"won fight with {incompatible}\") return True def __fight_incompatibles( incompatibles, structure,", "correspondence.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability # activate some", "initials = initial_relevance + initial_unhappiness if randomized > initials: string = workspace.target logging.info(f\"target", "\"bonds\", 1.0, 1.0) # fight incompatible groups # fight all groups containing these", "else: distribution = [1.0, 1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0]", "search: search = False if not destination.right_bond: continue if destination.right_bond.category != category: continue", "assert formulas.coin_flip(probability) bond.facet.buffer = 100.0 bond.source_descriptor.buffer = 100.0 bond.destination_descriptor.buffer = 100.0 logging.info(\"succeeded: posting", "if target_object.distinguishing_descriptor(node): new_list += [node] object_list = new_list # should this be +=", "100.0 ): object_from_target = object_from_target.flipped_version() concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), )", "0.5: string = workspace.target logging.info(f\"target string selected: {workspace.target}\") else: logging.info(f\"initial string selected: {workspace.initial}\")", "first_bond = source.left_bond else: first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond\") else:", "if not object1.right_bond: if group.direction_category == slipnet.right: source = object1 destination = object2", "= [o for o in workspace.initial.objects if isinstance(o, Letter)] letter_of_initial_string = random.choice(letters) logging.info(f\"selected", "= workspace.initial relevances = initial_relevance + target_relevance unhappinesses = initial_unhappiness + target_unhappiness randomized", "forward_bond = slipnet.sameness backward_bond = slipnet.sameness else: backward_bond = destination_descriptor.get_bond_category(source_descriptor) assert category in", "description_strength_tester(codelet): description = codelet.arguments[0] description.descriptor.buffer = 100.0 description.update_strength() strength = description.total_strength probability =", "if category == slipnet.sameness and isinstance(source, Letter): group = Group( source.string, slipnet.sameness_group, None,", "__get_cut_off(bond_density) * 10.0 assert cutoff >= formulas.actual_temperature if workspace.rule.build_translated_rule(): workspace.found_answer = True else:", "from {source} to {category.name}\") bond_facet = None # find leftmost object in group", "# activate some concepts for mapping in correspondence.concept_mappings: mapping.initial_description_type.buffer = 100.0 mapping.initial_descriptor.buffer =", "workspace.initial.objects if o.changed] # assert len(changed_objects) < 2 # if there are no", "there for o in group.object_list: assert o in workspace.objects # check to see", "o for o in workspace.modified.objects if isinstance(o, Letter) and o.left_index == position ]", "group with these bonds search = True destination = source while search: search", "= sum(distribution) * random.random() total = 0.0 for i in range(0, len(distribution)): total", "initial_description_types = [m.initial_description_type for m in opposites] flip_target_object = False if ( object_from_initial.spans_string()", "object_from_target.spans_string() and slipnet.direction_category in initial_description_types and __all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation != 100.0 ): object_from_target", "correspondence, 1.0, incompatible_rule, 1.0 ) for incompatible in incompatibles: incompatible.break_the_structure() # break incompatible", "object_list[i] # choose the relation (change the letmost object to \"successor\" or \"d\"", "elif density > 0.6: distribution = [2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0,", "if category == slipnet.identity: category = slipnet.sameness logging.info(f\"proposing {category.name} bond \") coderack.propose_bond( source,", "return \"other\" def __get_scout_source(slipnode, relevance_method, type_name): initial_relevance = relevance_method(workspace.initial, slipnode) target_relevance = relevance_method(workspace.target,", "conceptual depth to choose a relation value_list = [] for node in object_list:", "!= direction: if destination.right_bond.direction_category: continue if not bond_facet or bond_facet == destination.right_bond.facet: bond_facet", "of distinguishing descriptions for the first object # ie. string-position (left-,right-most,middle or whole)", "!= slipnet.opposite]) == 0 def __structure_versus_structure(structure1, weight1, structure2, weight2): structure1.update_strength() structure2.update_strength() weighted_strength1 =", "description.update_strength() strength = description.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet,", "value_list = [] for node in object_list: depth = node.conceptual_depth value = formulas.temperature_adjusted_value(depth)", "destination.right_bond.direction_category != direction: if destination.right_bond.direction_category: continue if not bond_facet or bond_facet == destination.right_bond.facet:", "= source.get_descriptor(bond_facet) destination_descriptor = destination.get_descriptor(bond_facet) assert source_descriptor assert destination_descriptor return source_descriptor, destination_descriptor def", ": relevance = {target_relevance}, \" f\"unhappiness = {int(target_unhappiness)}\" ) string = workspace.initial relevances", "= source search = True while search: search = False if not destination.right_bond:", "new ones to the existing corr. existing = correspondence.object_from_initial.correspondence for mapping in correspondence.concept_mappings:", "disable=too-many-branches # pylint: disable=too-many-statements def top_down_group_scout__category(codelet): group_category = codelet.arguments[0] category = group_category.get_related_node(slipnet.bond_category) assert", "in group with these bonds search = True destination = source while search:", "rule_translator(): assert workspace.rule if len(workspace.initial) == 1 and len(workspace.target) == 1: bond_density =", "group = leftmost coderack.propose_group( group.object_list, group.bond_list, group.group_category, group.direction_category, group.facet, codelet, ) return bonds", "direction, bond_facet, codelet ) def top_down_group_scout__direction(codelet): direction = codelet.arguments[0] source = __get_scout_source( direction,", "changed.get_descriptor(slipnet.letter_category) other_objects_of_same_letter = [ o for o in workspace.initial.objects if not o !=", "one of its type in the string object_list = [] position = changed.get_descriptor(slipnet.string_position_category)", "assert __structure_versus_structure( correspondence, 1.0, incompatible_rule, 1.0 ) for incompatible in incompatibles: incompatible.break_the_structure() #", "category, bond_facet, source_descriptor, destination_descriptor, codelet, ) def rule_scout(codelet): assert workspace.number_of_unreplaced_objects() == 0 changed_objects", "not workspace.target.equivalent_group(flipper) else: target_not_flipped = False initial_in_objects = object_from_initial in workspace.objects target_in_objects =", "slipnet.right if direction == slipnet.left: first_bond = source.left_bond else: first_bond = source.right_bond if", "[value] i = formulas.select_list_position(value_list) descriptor = object_list[i] # choose the relation (change the", "def __fight(structure, structure_weight, incompatibles, incompatible_weight): if not (incompatibles and len(incompatibles)): return True for", "mapping.slippability() / 100.0 probability_of_slippage = formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage): return True return False #", "strength) def bond_builder(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() assert bond.source in workspace.objects or", "slipnode) initial_unhappiness = workspace.initial.intra_string_unhappiness target_unhappiness = workspace.target.intra_string_unhappiness logging.info( f\"initial : relevance = {initial_relevance},", "def __structure_versus_structure(structure1, weight1, structure2, weight2): structure1.update_strength() structure2.update_strength() weighted_strength1 = formulas.temperature_adjusted_value( structure1.total_strength * weight1", "bottom_up_bond_scout(codelet): source = choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source) destination = choose_neighbour(source) assert destination logging.info(f\"destination: {destination}\")", "category, bond_facet, source_descriptor, destination_descriptor, codelet, ) else: coderack.propose_bond( destination, source, category, bond_facet, destination_descriptor,", "bond_facet = destination.right_bond.facet direction = source.right_bond.direction_category destination = destination.right_bond.right_object search = True assert", "workspace.initial: return \"initial\" return \"other\" def __get_scout_source(slipnode, relevance_method, type_name): initial_relevance = relevance_method(workspace.initial, slipnode)", "__get_descriptors(bond_facet, source, destination): source_descriptor = source.get_descriptor(bond_facet) destination_descriptor = destination.get_descriptor(bond_facet) assert source_descriptor assert destination_descriptor", "incompatible_rule = workspace.rule assert __structure_versus_structure( correspondence, 1.0, incompatible_rule, 1.0 ) for incompatible in", "in workspace.initial.objects if isinstance(o, Letter)] letter_of_initial_string = random.choice(letters) logging.info(f\"selected letter in initial string", "logging.info(\"trying to break incompatible correspondences\") assert __fight(bond, 2.0, incompatible_correspondences, 3.0) for incompatible in", "if workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule = workspace.rule assert __structure_versus_structure( correspondence, 1.0, incompatible_rule, 1.0", "= relevance_method(workspace.target, slipnode) initial_unhappiness = workspace.initial.intra_string_unhappiness target_unhappiness = workspace.target.intra_string_unhappiness logging.info( f\"initial : relevance", "] letter_of_modified_string = more_letters and more_letters[0] or None assert letter_of_modified_string position -= 1", "if structure.string == workspace.initial: return \"initial\" return \"other\" def __get_scout_source(slipnode, relevance_method, type_name): initial_relevance", "already spans the string - propose this object group = leftmost coderack.propose_group( group.object_list,", "letter_of_initial_string.changed = True workspace.changed_object = letter_of_initial_string logging.info(\"building replacement\") def top_down_bond_scout__category(codelet): logging.info(\"top_down_bond_scout__category\") category =", "fight against it incompatible_rule = None if workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence): incompatible_rule = workspace.rule", "density > 0.6: distribution = [2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0,", "] if not len(other_objects_of_same_letter): object_list += [letter] # if this object corresponds to", "in bond.string.bonds: if bond.same_neighbours(string_bond) and bond.same_categories(string_bond): if bond.direction_category: bond.direction_category.buffer = 100.0 bond.category.buffer =", "new_list = [] slippages = workspace.slippages() for node in object_list: node = node.apply_slippages(slippages)", "1.0, 1.0] elif density > 0.6: distribution = [2.0, 5.0, 150.0, 5.0, 2.0,", "correspondence_spans = ( correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span() ) for incompatible in incompatibles: incompatible_spans =", "in initial_description_types and __all_opposite_mappings(formulas.opposite_mappings) and slipnet.opposite.activation != 100.0 ): object_from_target = object_from_target.flipped_version() concept_mappings", "sliplinks[i] chosen_property = chosen.destination coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def top_down_description_scout(codelet): description_type", "import Correspondence from .group import Group from .letter import Letter from .replacement import", "if first_bond and not first_bond.direction_category: direction = None if not first_bond or first_bond.direction_category", "5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.4: distribution =", "source.rightmost: mydirection = slipnet.left else: activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not", "5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.6: distribution", "codelet, ) return bonds = [] objects = [leftmost] while leftmost.right_bond: bonds +=", "codelet, strength) def description_builder(codelet): description = codelet.arguments[0] assert description.object in workspace.objects if description.object.described(description.descriptor):", "first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond\") else: logging.info(f\"first_bond: {first_bond}\") if first_bond", "next_object = objekt # if incompatible bonds exist - fight group.update_strength() assert __fight_incompatibles(incompatible_bonds,", "mapping.initial_description_type.buffer = 100.0 mapping.initial_descriptor.buffer = 100.0 mapping.target_description_type.buffer = 100.0 mapping.target_descriptor.buffer = 100.0 coderack.new_codelet(\"correspondence-builder\",", "descriptor: {destination_descriptor.name.upper()}\") category = source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category =", "[o for o in workspace.initial.objects if o.changed] # assert len(changed_objects) < 2 #", "for o in workspace.initial.objects if not o != changed and o.get_description_type(letter) ] if", "slipnet.letter_category, descriptor, slipnet.letter, relation, codelet ) def rule_strength_tester(codelet): rule = codelet.arguments[0] rule.update_strength() probability", "\"bond\" ) destination = choose_directed_neighbor(source, direction) assert destination logging.info(f\"to object: {destination}\") bond_facet =", "target_not_flipped = not workspace.target.equivalent_group(flipper) else: target_not_flipped = False initial_in_objects = object_from_initial in workspace.objects", "and o.get_description_type(letter) ] if not len(other_objects_of_same_letter): object_list += [letter] # if this object", "bond_density = 1.0 else: number_of_bonds = len(workspace.initial.bonds) + len(workspace.target.bonds) nearly_total_length = len(workspace.initial) +", "only one of its type in the string object_list = [] position =", "if category == forward_bond: coderack.propose_bond( source, destination, category, bond_facet, source_descriptor, destination_descriptor, codelet, )", "] # use conceptual depth to choose a relation value_list = [] for", "== slipnet.identity: forward_bond = slipnet.sameness backward_bond = slipnet.sameness else: backward_bond = destination_descriptor.get_bond_category(source_descriptor) assert", "= workspace.rule assert __structure_versus_structure( correspondence, 1.0, incompatible_rule, 1.0 ) for incompatible in incompatibles:", "existing corr. existing = correspondence.object_from_initial.correspondence for mapping in correspondence.concept_mappings: if mapping.label: mapping.label.buffer =", "chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) descriptions = chosen_object.get_possible_descriptions(description_type) assert descriptions values", "{letter_of_initial_string}, so fizzling\" ) return position = letter_of_initial_string.left_index more_letters = [ o for", "= chosen_bond.facet bonds = possible_group_bonds(category, direction_category, bond_facet, bonds) assert bonds group_category = category.get_related_node(slipnet.group_category)", "= [ m for m in distinguishing_mappings if m.initial_description_type == slipnet.string_position_category and m.initial_description_type", "leftmost = objekt while leftmost.group and leftmost.group.bond_category == slipnet.sameness: leftmost = leftmost.group if", "list chosen_bond = random.choice(bonds) category = chosen_bond.category direction_category = chosen_bond.direction_category bond_facet = chosen_bond.facet", "slipnet.left else: mydirection = slipnet.right if mydirection == slipnet.left: first_bond = source.left_bond else:", "else: first_bond = source.right_bond if not first_bond: logging.info(\"no first_bond2\") else: logging.info(f\"first_bond2: {first_bond}\") if", "= ( correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span() ) for incompatible in incompatibles: incompatible_spans = (", "string selected: {workspace.initial} for {type_name}\") source = choose_unmodified_object(\"intra_string_salience\", string.objects) return source def __get_bond_facet(source,", "def group_builder(codelet): # update strength value of the group group = codelet.arguments[0] __show_which_string_object_is_from(group)", "if mapping.label: mapping.label.buffer = 100.0 if not mapping.is_contained_by(existing.concept_mappings): existing.concept_mappings += [mapping] return incompatibles", "probability = formulas.temperature_adjusted_probability(strength / 100.0) logging.info(f\"bond strength = {strength} for {bond}\") assert formulas.coin_flip(probability)", "incompatible_group, 1.0 ) # if there is an incompatible rule, fight against it", "\"target\" if structure.string == workspace.initial: return \"initial\" return \"other\" def __get_scout_source(slipnode, relevance_method, type_name):", "True for incompatible in incompatibles: if not __structure_versus_structure( structure, structure_weight, incompatible, incompatible_weight ):", "objekt.right_bond if right_bond: if right_bond.right_object == next_object: continue if right_bond.direction_category == group.direction_category: continue", "import Replacement from .slipnet import slipnet from .workspace_formulas import choose_bond_facet from .workspace_formulas import", "= destination.right_bond.right_object search = True assert destination != source objects = [source] bonds", "destination = destination.right_bond.right_object search = True assert destination != source logging.info(f\"proposing group from", "= formulas.temperature_adjusted_probability(strength / 100.0) assert random.random() <= probability # it is strong enough", "builder & activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer = 100.0 if group.direction_category: group.direction_category.buffer = 100.0 coderack.new_codelet(\"group-builder\",", "+ 1 return len(distribution) def rule_translator(): assert workspace.rule if len(workspace.initial) == 1 and", "and add new ones to the existing corr. existing = correspondence.object_from_initial.correspondence for mapping", "not bond_facet or bond_facet == destination.right_bond.facet: bond_facet = destination.right_bond.facet direction = source.right_bond.direction_category destination", "in mappings if m.label != slipnet.opposite]) == 0 def __structure_versus_structure(structure1, weight1, structure2, weight2):", "incompatibles: correspondence_spans = ( correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span() ) for incompatible in incompatibles: incompatible_spans", "assert workspace.rule if len(workspace.initial) == 1 and len(workspace.target) == 1: bond_density = 1.0", "distribution = [2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif", "target_in_objects and (not (want_flip and target_not_flipped)) ) if correspondence.reflexive(): # if the correspondence", "bond-builder\") coderack.new_codelet(\"bond-builder\", codelet, strength) def bond_builder(codelet): bond = codelet.arguments[0] __show_which_string_object_is_from(bond) bond.update_strength() assert bond.source", "left_bond.direction_category == group.direction_category: continue incompatible_bonds += [left_bond] previous = objekt next_object = group.object_list[-1]", "rules if workspace.rule: assert __structure_versus_structure(rule, 1.0, workspace.rule, 1.0) workspace.build_rule(rule) def __get_cut_off(density): if density", "assert chosen_object __show_which_string_object_is_from(chosen_object) description = formulas.choose_relevant_description_by_activation(chosen_object) assert description sliplinks = formulas.similar_property_links(description.descriptor) assert sliplinks", "# if it is the only one of its type in the string", "or first_bond.category != category: # check the other side of object if direction", "+ len(workspace.target) - 2 bond_density = number_of_bonds / nearly_total_length if bond_density > 1.0:", ") if correspondence.reflexive(): # if the correspondence exists, activate concept mappings # and", "1 initial_ascii = ord(workspace.initial_string[position]) modified_ascii = ord(workspace.modified_string[position]) diff = initial_ascii - modified_ascii if", "strings, check to see if the # string description needs to be flipped", "source.leftmost: mydirection = slipnet.right elif source.rightmost: mydirection = slipnet.left else: activations = [slipnet.left.activation]", "destination = object1 category = group.group_category.get_related_node(slipnet.bond_category) facet = group.facet new_bond = Bond( source,", "noinspection PyStringFormat def group_scout__whole_string(codelet): string = workspace.initial if random.random() > 0.5: string =", "if target_object.described(node): if target_object.distinguishing_descriptor(node): new_list += [node] object_list = new_list # should this", "[slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): direction = slipnet.left else: direction =", "= initial_unhappiness + target_unhappiness randomized = random.random() * (relevances + unhappinesses) initials =", "source objects = [source] bonds = [] while source != destination: bonds +=", "= [source] bonds = [] while source != destination: bonds += [source.right_bond] objects", "for o in workspace.initial.objects if o.changed] # assert len(changed_objects) < 2 # if", "if m.label != slipnet.opposite]) == 0 def __structure_versus_structure(structure1, weight1, structure2, weight2): structure1.update_strength() structure2.update_strength()", "def bottom_up_bond_scout(codelet): source = choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source) destination = choose_neighbour(source) assert destination logging.info(f\"destination:", "assert destination != source logging.info(f\"proposing group from {source} to {destination}\") objects = [source]", "assert distinguishing_mappings # if both objects span the strings, check to see if", "mydirection = slipnet.left else: mydirection = slipnet.right if mydirection == slipnet.left: first_bond =", "span the strings, check to see if the # string description needs to", "!= direction: if mydirection == slipnet.right: first_bond = source.left_bond else: first_bond = source.right_bond", "( correspondence.object_from_initial.letter_span() + correspondence.object_from_target.letter_span() ) for incompatible in incompatibles: incompatible_spans = ( incompatible.object_from_initial.letter_span()", "formulas.coin_flip(probability_of_fizzle) # choose a structure at random structures = [ s for s", "group and bond if they exist if incompatible_bond: incompatible_bond.break_the_structure() if incompatible_group: incompatible_group.break_the_structure() if", "import workspace from .workspace_object import WorkspaceObject # some methods common to the codelets", "if destination.right_bond.category != category: continue if destination.right_bond.direction_category != direction: if destination.right_bond.direction_category: continue if", "[ s for s in workspace.structures if isinstance(s, (Group, Bond, Correspondence)) ] assert", "random letter in initial string letters = [o for o in workspace.initial.objects if", "if bond_density > 1.0: bond_density = 1.0 cutoff = __get_cut_off(bond_density) * 10.0 assert", "i = formulas.select_list_position(value_list) relation = object_list[i] coderack.propose_rule( slipnet.letter_category, descriptor, slipnet.letter, relation, codelet )", "?? assert object_list # use conceptual depth to choose a description value_list =", "leftmost object in group with these bonds search = True while search: search", "[1.0, 1.0, 2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0] else: distribution =", "structure.destination.group: break_objects += [structure.source.group] # try to break all objects for structure in", "= None incompatible_group = None # if there is an incompatible bond then", "None, codelet) changed = changed_objects[-1] # generate a list of distinguishing descriptions for", "+= [leftmost.right_bond] leftmost = leftmost.right_bond.right_object objects += [leftmost] assert leftmost.rightmost # choose a", "__structure_versus_structure(structure1, weight1, structure2, weight2): structure1.update_strength() structure2.update_strength() weighted_strength1 = formulas.temperature_adjusted_value( structure1.total_strength * weight1 )", "incompatible in incompatible_bonds: incompatible.break_the_structure() # create new bonds group.bond_list = [] for i", "bond.category.buffer = 100.0 logging.info(\"already exists: activate descriptors & Fizzle\") return incompatible_bonds = bond.get_incompatible_bonds()", "if changed.replacement.relation: object_list += [changed.replacement.relation] object_list += [ changed.replacement.object_from_modified.get_descriptor(slipnet.letter_category) ] # use conceptual", "= node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) descriptor =", "# it is strong enough - post builder & activate nodes group.group_category.get_related_node(slipnet.bond_category).buffer =", "an incompatible rule, fight against it incompatible_rule = None if workspace.rule: if workspace.rule.incompatible_rule_correspondence(correspondence):", "= None # find leftmost object in group with these bonds while search:", "incompatible bond then fight against it initial = correspondence.object_from_initial target = correspondence.object_from_target if", "diff = initial_ascii - modified_ascii if abs(diff) < 2: relations = {0: slipnet.sameness,", "relations[diff] logging.info(f\"Relation found: {relation.name}\") else: relation = None logging.info(\"no relation found\") letter_of_initial_string.replacement =", ") destination = choose_directed_neighbor(source, direction) assert destination logging.info(f\"to object: {destination}\") bond_facet = __get_bond_facet(source,", "formulas.temperature_adjusted_probability(strength / 100.0) assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet, strength) def description_builder(codelet): description = codelet.arguments[0]", "are distinguishing distinguishing_mappings = [m for m in concept_mappings if m.distinguishing()] assert distinguishing_mappings", "+= [mapping] return incompatibles = correspondence.get_incompatible_correspondences() # fight against all correspondences if incompatibles:", "source_descriptor, destination_descriptor = __get_descriptors( bond_facet, source, destination ) forward_bond = source_descriptor.get_bond_category(destination_descriptor) if forward_bond", "cutoff = __get_cut_off(bond_density) * 10.0 assert cutoff >= formulas.actual_temperature if workspace.rule.build_translated_rule(): workspace.found_answer =", "(left-,right-most,middle or whole) or letter category # if it is the only one", "incompatible_weight): logging.info(f\"broke the {name}\") return True logging.info(f\"failed to break {name}: Fizzle\") return False", "concept mappings concept_mappings = formulas.get_mappings( object_from_initial, object_from_target, object_from_initial.relevant_descriptions(), object_from_target.relevant_descriptions(), ) assert concept_mappings assert", "the strings, check to see if the # string description needs to be", "100.0 mapping.initial_descriptor.buffer = 100.0 mapping.target_description_type.buffer = 100.0 mapping.target_descriptor.buffer = 100.0 coderack.new_codelet(\"correspondence-builder\", codelet, strength,", "in workspace.objects # check to see if bonds are there of the same", "source.left_bond.left_object search = True destination = source search = True while search: search", "bond_facet, source, destination ) forward_bond = source_descriptor.get_bond_category(destination_descriptor) if forward_bond == slipnet.identity: forward_bond =", "assert object_from_initial in workspace.objects assert ( object_from_target in workspace.objects or correspondence.flip_target_object and not", "bond facet: {bond_facet.get_name()}\") logging.info(f\"Source: {source}, destination: {destination}\") bond_descriptors = __get_descriptors(bond_facet, source, destination) source_descriptor,", "return True for incompatible in incompatibles: if not __structure_versus_structure( structure, structure_weight, incompatible, incompatible_weight", "= destination.right_bond.facet direction = source.right_bond.direction_category destination = destination.right_bond.right_object search = True assert destination", "> 0.5: string = workspace.target logging.info(f\"target string selected: {workspace.target}\") else: logging.info(f\"initial string selected:", "# object_list = the union of this and the distingushing descriptors if changed.correspondence:", "incompatible_rule, 1.0 ) for incompatible in incompatibles: incompatible.break_the_structure() # break incompatible group and", "# find rightmost object in group with these bonds search = True destination", "group.direction_category: continue incompatible_bonds += [left_bond] previous = objekt next_object = group.object_list[-1] for objekt", "group.direction_category.buffer = 100.0 coderack.new_codelet(\"group-builder\", codelet, strength) def group_builder(codelet): # update strength value of", "formulas.actual_temperature if workspace.rule.build_translated_rule(): workspace.found_answer = True else: temperature.clamp_time = coderack.codelets_run + 100 temperature.clamped", "activations = [slipnet.left.activation] activations += [slipnet.right.activation] if not formulas.select_list_position(activations): direction = slipnet.left else:", "chosen_object = choose_unmodified_object(\"total_salience\", workspace.objects) assert chosen_object __show_which_string_object_is_from(chosen_object) description = formulas.choose_relevant_description_by_activation(chosen_object) assert description sliplinks", "{destination}\") bond_facet = __get_bond_facet(source, destination) logging.info(f\"chosen bond facet: {bond_facet.get_name()}\") logging.info(f\"Source: {source}, destination: {destination}\")", "description in objekt.relevant_descriptions(): if description.descriptor == initial_descriptor: target_candidates += [objekt] assert target_candidates object_from_target", "sliplink in sliplinks ] i = formulas.select_list_position(values) chosen = sliplinks[i] chosen_property = chosen.destination", "first_bond and not first_bond.direction_category: direction = None if not first_bond or first_bond.direction_category !=", "structure1.total_strength * weight1 ) weighted_strength2 = formulas.temperature_adjusted_value( structure2.total_strength * weight2 ) rhs =", "return i + 1 return len(distribution) def rule_translator(): assert workspace.rule if len(workspace.initial) ==", "1.0, 1.0) for incompatible in incompatible_bonds: incompatible.break_the_structure() # create new bonds group.bond_list =", "= False if not source.left_bond: continue if source.left_bond.category != category: continue if source.left_bond.direction_category", "= [] objects = [leftmost] while leftmost.right_bond: bonds += [leftmost.right_bond] leftmost = leftmost.right_bond.right_object", "[2.0, 5.0, 150.0, 5.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0] elif density >", "of this and the distingushing descriptors if changed.correspondence: target_object = changed.correspondence.object_from_target new_list =", "source.left_bond.facet: bond_facet = source.left_bond.facet direction = source.left_bond.direction_category source = source.left_bond.left_object search = True", "= random.random() * (relevances + unhappinesses) initials = initial_relevance + initial_unhappiness if randomized", "group with these bonds while search: search = False if not source.left_bond: continue", "depth = node.conceptual_depth value = formulas.temperature_adjusted_value(depth) value_list += [value] i = formulas.select_list_position(value_list) descriptor", ") def important_object_correspondence_scout(codelet): object_from_initial = choose_unmodified_object( \"relative_importance\", workspace.initial.objects ) descriptors = object_from_initial.relevant_distinguishing_descriptors() slipnode", "probability = group.single_letter_group_probability() assert random.random() >= probability coderack.propose_single_letter_group(source, codelet) return direction = first_bond.direction_category", "1] object2 = group.object_list[i] if not object1.right_bond: if group.direction_category == slipnet.right: source =", "if isinstance(s, (Group, Bond, Correspondence)) ] assert structures structure = random.choice(structures) __show_which_string_object_is_from(structure) break_objects", "workspace # object_list = the union of this and the distingushing descriptors if", "100.0) assert random.random() <= probability # it is strong enough - post builder", "2.0, 1.0, 1.0, 1.0, 1.0] elif density > 0.2: distribution = [1.0, 1.0,", "+= [objekt] assert target_candidates object_from_target = choose_unmodified_object( \"inter_string_salience\", target_candidates ) assert object_from_initial.spans_string() ==", "bond_facet, source_descriptor, destination_descriptor, codelet, ) else: coderack.propose_bond( destination, source, category, bond_facet, destination_descriptor, source_descriptor,", "1.0, 1.0, 1.0] else: distribution = [1.0, 1.0, 1.0, 2.0, 5.0, 150.0, 5.0,", "replacement_finder(): # choose random letter in initial string letters = [o for o", "correspondence.object_from_initial object_from_target = correspondence.object_from_target assert object_from_initial in workspace.objects assert ( object_from_target in workspace.objects", "100.0 probability_of_slippage = formulas.temperature_adjusted_probability(slippiness) if formulas.coin_flip(probability_of_slippage): return True return False # start the", "a description value_list = [] for node in object_list: depth = node.conceptual_depth value", "incompatible.break_the_structure() for incompatible in incompatible_groups: incompatible.break_the_structure() for incompatible in incompatible_correspondences: incompatible.break_the_structure() logging.info(f\"building bond", "] assert structures structure = random.choice(structures) __show_which_string_object_is_from(structure) break_objects = [structure] if isinstance(structure, Bond):", "= workspace.target logging.info(f\"target string selected: {workspace.target}\") else: logging.info(f\"initial string selected: {workspace.initial}\") # find", "codelet, strength) def group_builder(codelet): # update strength value of the group group =", "return bond_facet def __get_descriptors(bond_facet, source, destination): source_descriptor = source.get_descriptor(bond_facet) destination_descriptor = destination.get_descriptor(bond_facet) assert", "strength = description.total_strength probability = formulas.temperature_adjusted_probability(strength / 100.0) assert formulas.coin_flip(probability) coderack.new_codelet(\"description-builder\", codelet, strength)", "= choose_unmodified_object(\"intra_string_salience\", workspace.objects) __show_which_string_object_is_from(source) destination = choose_neighbour(source) assert destination logging.info(f\"destination: {destination}\") bond_facet =", "disable=too-many-statements def top_down_group_scout__category(codelet): group_category = codelet.arguments[0] category = group_category.get_related_node(slipnet.bond_category) assert category source =", "changed.correspondence.object_from_target new_list = [] slippages = workspace.slippages() for node in object_list: node =", "chosen.destination coderack.propose_description( chosen_object, chosen_property.category(), chosen_property, codelet ) def top_down_description_scout(codelet): description_type = codelet.arguments[0] chosen_object", ") category = source_descriptor.get_bond_category(destination_descriptor) assert category if category == slipnet.identity: category = slipnet.sameness", "string = workspace.target logging.info(f\"target string selected: {workspace.target}\") else: logging.info(f\"initial string selected: {workspace.initial}\") #", "codelet.arguments[0] rule.update_strength() probability = formulas.temperature_adjusted_probability(rule.total_strength / 100.0) assert random.random() <= probability coderack.new_codelet(\"rule-builder\", codelet,", "codelet ) # noinspection PyStringFormat def group_scout__whole_string(codelet): string = workspace.initial if random.random() >", "coderack.codelets_run + 100 temperature.clamped = True formulas.Temperature = 100.0 def bottom_up_correspondence_scout(codelet): object_from_initial =", "formulas.select_list_position(activations): direction = slipnet.left else: direction = slipnet.right if direction == slipnet.left: first_bond", "relation (change the letmost object to \"successor\" or \"d\" object_list = [] if", "0 def __structure_versus_structure(structure1, weight1, structure2, weight2): structure1.update_strength() structure2.update_strength() weighted_strength1 = formulas.temperature_adjusted_value( structure1.total_strength *", "for sliplink in sliplinks ] i = formulas.select_list_position(values) chosen = sliplinks[i] chosen_property =" ]
[ "# tune y_from, y_to, x_from, x_to = \\ list(map(functools.partial(np.maximum, 0), [y_from, y_to, x_from,", "y_diff y_to = y + h + y_diff x_from = x - x_diff", "# percentage y_diff = math.ceil(h * offset / 100) x_diff = math.ceil(w *", "= (bbox_cluster == cluster_index).astype(np.float32) try: horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0] vertical_indicies = np.where(np.any(char_pixel, axis=1))[0]", "= y + h + y_diff x_from = x - x_diff x_to =", "= kuzu_cls.load_image(char_img) pred_label = kuzu_cls.predict(char_img) # print(pred_label) char_draw.text( (x + w + FONT_SIZE", "make_contours(pred_bbox) # get all center points by contour method center_coords = get_centers(pred_center.astype(np.uint8)) no_center_points", "= char_img.convert(\"RGB\") char_img = np.asarray(char_img) final_bbox = cv2.resize(final_bbox, (origin_w, origin_h)) final_center = cv2.resize(final_center,", "import math import time import functools import random from tqdm import tqdm import", "= 5 # percentage y_diff = math.ceil(h * offset / 100) x_diff =", "= x + w + x_diff # tune y_from, y_to, x_from, x_to =", "+ x_diff # tune y_from, y_to, x_from, x_to = \\ list(map(functools.partial(np.maximum, 0), [y_from,", "y_from, y_to, x_from, x_to = \\ list(map(functools.partial(np.maximum, 0), [y_from, y_to, x_from, x_to])) try:", "pred_label, fill=(0, 0, 255, 255), font=font ) except Exception as e: print(e) continue", "= os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon = True kuzu_seg = KuzuSegment() kuzu_cls = KuzuClassify()", "rcParams['figure.figsize'] = 20, 20 # noqa from consts import FONT_SIZE from utils import", "try: horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0] vertical_indicies = np.where(np.any(char_pixel, axis=1))[0] x_min, x_max = horizontal_indicies[[0,", "# set offset to crop character offset = 5 # percentage y_diff =", "= vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2) y_ratio = origin_h / 512", "= kuzu_seg.predict(img) # get all polygon area in image polygon_contours = make_contours(pred_bbox) #", "final_center = vis_pred_center(center_coords, rad=2) # filter polygon if filter_polygon: filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords)", "y_diff = math.ceil(h * offset / 100) x_diff = math.ceil(w * offset /", "0: bbox_cluster = get_labels(center_coords, pred_bbox) # ignore background hex color (=0) for cluster_index", "os import math import time import functools import random from tqdm import tqdm", "* x_ratio) y = int(y * y_ratio) h = int(h * y_ratio) #", "(bbox_cluster == cluster_index).astype(np.float32) try: horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0] vertical_indicies = np.where(np.any(char_pixel, axis=1))[0] x_min,", "= \\ list(map(functools.partial(np.maximum, 0), [y_from, y_to, x_from, x_to])) try: char_img = origin_image[y_from:y_to, x_from:x_to]", "* y_ratio) h = int(h * y_ratio) # set offset to crop character", "x_min y = y_min w = x_max - x_min h = y_max -", "import FONT_SIZE from utils import ( make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center,", "( make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font ) from grpc_utils import", "vertical_indicies[[0, -1]] except IndexError: continue x = x_min y = y_min w =", "except IndexError: continue x = x_min y = y_min w = x_max -", "y_to, x_from, x_to = \\ list(map(functools.partial(np.maximum, 0), [y_from, y_to, x_from, x_to])) try: char_img", "512 pil_img = Image.fromarray(origin_image).convert('RGBA') char_canvas = Image.new('RGBA', pil_img.size) char_draw = ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points))", "char_img = char_img.convert(\"RGB\") char_img = np.asarray(char_img) final_bbox = cv2.resize(final_bbox, (origin_w, origin_h)) final_center =", "y_min, y_max = vertical_indicies[[0, -1]] except IndexError: continue x = x_min y =", "h = y_max - y_min # convert to original coordinates x = int(x", "set offset to crop character offset = 5 # percentage y_diff = math.ceil(h", "color (=0) for cluster_index in tqdm(range(len(center_coords))[1:]): char_pixel = (bbox_cluster == cluster_index).astype(np.float32) try: horizontal_indicies", "pil_img.size) char_draw = ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points)) if no_center_points > 0: bbox_cluster = get_labels(center_coords,", "= origin_image[y_from:y_to, x_from:x_to] char_img = kuzu_cls.load_image(char_img) pred_label = kuzu_cls.predict(char_img) # print(pred_label) char_draw.text( (x", "* offset / 100) # expand area y_from = y - y_diff y_to", "if no_center_points > 0: bbox_cluster = get_labels(center_coords, pred_bbox) # ignore background hex color", "final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2) y_ratio = origin_h / 512 x_ratio = origin_w", "= int(w * x_ratio) y = int(y * y_ratio) h = int(h *", "kuzu_cls.load_image(char_img) pred_label = kuzu_cls.predict(char_img) # print(pred_label) char_draw.text( (x + w + FONT_SIZE /", "x_from = x - x_diff x_to = x + w + x_diff #", "== '__main__': img_dir = \"./images\" img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon = True", "= 20, 20 # noqa from consts import FONT_SIZE from utils import (", "KuzuClassify ) if __name__ == '__main__': img_dir = \"./images\" img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir)))", "> 0: bbox_cluster = get_labels(center_coords, pred_bbox) # ignore background hex color (=0) for", "{}\".format(no_center_points)) if no_center_points > 0: bbox_cluster = get_labels(center_coords, pred_bbox) # ignore background hex", "to crop character offset = 5 # percentage y_diff = math.ceil(h * offset", "100) x_diff = math.ceil(w * offset / 100) # expand area y_from =", "= int(h * y_ratio) # set offset to crop character offset = 5", "x_from, x_to = \\ list(map(functools.partial(np.maximum, 0), [y_from, y_to, x_from, x_to])) try: char_img =", "math import time import functools import random from tqdm import tqdm import cv2", "= ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points)) if no_center_points > 0: bbox_cluster = get_labels(center_coords, pred_bbox) #", "for cluster_index in tqdm(range(len(center_coords))[1:]): char_pixel = (bbox_cluster == cluster_index).astype(np.float32) try: horizontal_indicies = np.where(np.any(char_pixel,", "- x_min h = y_max - y_min # convert to original coordinates x", ") from grpc_utils import ( KuzuSegment, KuzuClassify ) if __name__ == '__main__': img_dir", "= filter_polygons_points_intersection(polygon_contours, center_coords) # noqa pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox = vis_pred_bbox(pred_bbox, center_coords,", "original coordinates x = int(x * x_ratio) w = int(w * x_ratio) y", "no_center_points = len(center_coords) final_center = vis_pred_center(center_coords, rad=2) # filter polygon if filter_polygon: filtered_contours", "functools import random from tqdm import tqdm import cv2 import numpy as np", "import numpy as np import matplotlib.pyplot as plt from PIL import Image, ImageDraw", "Exception as e: print(e) continue char_img = Image.alpha_composite(pil_img, char_canvas) char_img = char_img.convert(\"RGB\") char_img", "np.where(np.any(char_pixel, axis=0))[0] vertical_indicies = np.where(np.any(char_pixel, axis=1))[0] x_min, x_max = horizontal_indicies[[0, -1]] y_min, y_max", "/ 4, y + h / 2 - FONT_SIZE), pred_label, fill=(0, 0, 255,", "0), [y_from, y_to, x_from, x_to])) try: char_img = origin_image[y_from:y_to, x_from:x_to] char_img = kuzu_cls.load_image(char_img)", "origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp) pred_bbox, pred_center = kuzu_seg.predict(img) # get all polygon", "by contour method center_coords = get_centers(pred_center.astype(np.uint8)) no_center_points = len(center_coords) final_center = vis_pred_center(center_coords, rad=2)", "vertical_indicies = np.where(np.any(char_pixel, axis=1))[0] x_min, x_max = horizontal_indicies[[0, -1]] y_min, y_max = vertical_indicies[[0,", "Image.alpha_composite(pil_img, char_canvas) char_img = char_img.convert(\"RGB\") char_img = np.asarray(char_img) final_bbox = cv2.resize(final_bbox, (origin_w, origin_h))", "cluster_index).astype(np.float32) try: horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0] vertical_indicies = np.where(np.any(char_pixel, axis=1))[0] x_min, x_max =", "time import functools import random from tqdm import tqdm import cv2 import numpy", "cv2 import numpy as np import matplotlib.pyplot as plt from PIL import Image,", "= origin_w / 512 pil_img = Image.fromarray(origin_image).convert('RGBA') char_canvas = Image.new('RGBA', pil_img.size) char_draw =", "+ h / 2 - FONT_SIZE), pred_label, fill=(0, 0, 255, 255), font=font )", "import random from tqdm import tqdm import cv2 import numpy as np import", "char_img = origin_image[y_from:y_to, x_from:x_to] char_img = kuzu_cls.load_image(char_img) pred_label = kuzu_cls.predict(char_img) # print(pred_label) char_draw.text(", "- FONT_SIZE), pred_label, fill=(0, 0, 255, 255), font=font ) except Exception as e:", "* x_ratio) w = int(w * x_ratio) y = int(y * y_ratio) h", "vis_pred_center(center_coords, rad=2) # filter polygon if filter_polygon: filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa", "filter polygon if filter_polygon: filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa pred_bbox = vis_pred_bbox_polygon(pred_bbox,", "y_min w = x_max - x_min h = y_max - y_min # convert", "filter_polygon: filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox =", "in image polygon_contours = make_contours(pred_bbox) # get all center points by contour method", "random from tqdm import tqdm import cv2 import numpy as np import matplotlib.pyplot", "FONT_SIZE / 4, y + h / 2 - FONT_SIZE), pred_label, fill=(0, 0,", "= vertical_indicies[[0, -1]] except IndexError: continue x = x_min y = y_min w", "matplotlib.pyplot as plt from PIL import Image, ImageDraw from pylab import rcParams rcParams['figure.figsize']", "char_img = kuzu_cls.load_image(char_img) pred_label = kuzu_cls.predict(char_img) # print(pred_label) char_draw.text( (x + w +", "import Image, ImageDraw from pylab import rcParams rcParams['figure.figsize'] = 20, 20 # noqa", "w + FONT_SIZE / 4, y + h / 2 - FONT_SIZE), pred_label,", "kuzu_seg.load_image(img_fp) pred_bbox, pred_center = kuzu_seg.predict(img) # get all polygon area in image polygon_contours", "tqdm import tqdm import cv2 import numpy as np import matplotlib.pyplot as plt", "+ h + y_diff x_from = x - x_diff x_to = x +", "image polygon_contours = make_contours(pred_bbox) # get all center points by contour method center_coords", "h = int(h * y_ratio) # set offset to crop character offset =", "e: print(e) continue char_img = Image.alpha_composite(pil_img, char_canvas) char_img = char_img.convert(\"RGB\") char_img = np.asarray(char_img)", "= vis_pred_bbox(pred_bbox, center_coords, width=2) y_ratio = origin_h / 512 x_ratio = origin_w /", "= kuzu_cls.predict(char_img) # print(pred_label) char_draw.text( (x + w + FONT_SIZE / 4, y", "x_diff = math.ceil(w * offset / 100) # expand area y_from = y", "= True kuzu_seg = KuzuSegment() kuzu_cls = KuzuClassify() img, origin_image, origin_h, origin_w =", "rad=2) # filter polygon if filter_polygon: filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa pred_bbox", "import os import math import time import functools import random from tqdm import", "char_draw = ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points)) if no_center_points > 0: bbox_cluster = get_labels(center_coords, pred_bbox)", "= int(x * x_ratio) w = int(w * x_ratio) y = int(y *", "Image.new('RGBA', pil_img.size) char_draw = ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points)) if no_center_points > 0: bbox_cluster =", "offset = 5 # percentage y_diff = math.ceil(h * offset / 100) x_diff", "as plt from PIL import Image, ImageDraw from pylab import rcParams rcParams['figure.figsize'] =", "20, 20 # noqa from consts import FONT_SIZE from utils import ( make_contours,", "FONT_SIZE from utils import ( make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font", "make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font ) from grpc_utils import (", "from pylab import rcParams rcParams['figure.figsize'] = 20, 20 # noqa from consts import", "horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0] vertical_indicies = np.where(np.any(char_pixel, axis=1))[0] x_min, x_max = horizontal_indicies[[0, -1]]", "pil_img = Image.fromarray(origin_image).convert('RGBA') char_canvas = Image.new('RGBA', pil_img.size) char_draw = ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points)) if", "kuzu_cls.predict(char_img) # print(pred_label) char_draw.text( (x + w + FONT_SIZE / 4, y +", "print(e) continue char_img = Image.alpha_composite(pil_img, char_canvas) char_img = char_img.convert(\"RGB\") char_img = np.asarray(char_img) final_bbox", "-1]] y_min, y_max = vertical_indicies[[0, -1]] except IndexError: continue x = x_min y", "512 x_ratio = origin_w / 512 pil_img = Image.fromarray(origin_image).convert('RGBA') char_canvas = Image.new('RGBA', pil_img.size)", "/ 100) # expand area y_from = y - y_diff y_to = y", "axis=1))[0] x_min, x_max = horizontal_indicies[[0, -1]] y_min, y_max = vertical_indicies[[0, -1]] except IndexError:", "continue char_img = Image.alpha_composite(pil_img, char_canvas) char_img = char_img.convert(\"RGB\") char_img = np.asarray(char_img) final_bbox =", "255), font=font ) except Exception as e: print(e) continue char_img = Image.alpha_composite(pil_img, char_canvas)", "pred_center = kuzu_seg.predict(img) # get all polygon area in image polygon_contours = make_contours(pred_bbox)", "= horizontal_indicies[[0, -1]] y_min, y_max = vertical_indicies[[0, -1]] except IndexError: continue x =", "x_to = \\ list(map(functools.partial(np.maximum, 0), [y_from, y_to, x_from, x_to])) try: char_img = origin_image[y_from:y_to,", "rcParams rcParams['figure.figsize'] = 20, 20 # noqa from consts import FONT_SIZE from utils", "ImageDraw from pylab import rcParams rcParams['figure.figsize'] = 20, 20 # noqa from consts", "kuzu_seg = KuzuSegment() kuzu_cls = KuzuClassify() img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp) pred_bbox,", "4, y + h / 2 - FONT_SIZE), pred_label, fill=(0, 0, 255, 255),", "filter_polygon = True kuzu_seg = KuzuSegment() kuzu_cls = KuzuClassify() img, origin_image, origin_h, origin_w", "0, 255, 255), font=font ) except Exception as e: print(e) continue char_img =", "hex color (=0) for cluster_index in tqdm(range(len(center_coords))[1:]): char_pixel = (bbox_cluster == cluster_index).astype(np.float32) try:", "convert to original coordinates x = int(x * x_ratio) w = int(w *", "- y_diff y_to = y + h + y_diff x_from = x -", "x_ratio) y = int(y * y_ratio) h = int(h * y_ratio) # set", "utils import ( make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font ) from", "+ y_diff x_from = x - x_diff x_to = x + w +", "y_from = y - y_diff y_to = y + h + y_diff x_from", "int(y * y_ratio) h = int(h * y_ratio) # set offset to crop", "char_pixel = (bbox_cluster == cluster_index).astype(np.float32) try: horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0] vertical_indicies = np.where(np.any(char_pixel,", "= np.asarray(char_img) final_bbox = cv2.resize(final_bbox, (origin_w, origin_h)) final_center = cv2.resize(final_center, (origin_w, origin_h)) plt.imshow(char_img)", "char_draw.text( (x + w + FONT_SIZE / 4, y + h / 2", "x_to = x + w + x_diff # tune y_from, y_to, x_from, x_to", "math.ceil(h * offset / 100) x_diff = math.ceil(w * offset / 100) #", "\"./images\" img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon = True kuzu_seg = KuzuSegment() kuzu_cls", "no_center_points > 0: bbox_cluster = get_labels(center_coords, pred_bbox) # ignore background hex color (=0)", "percentage y_diff = math.ceil(h * offset / 100) x_diff = math.ceil(w * offset", "= get_labels(center_coords, pred_bbox) # ignore background hex color (=0) for cluster_index in tqdm(range(len(center_coords))[1:]):", "\\ list(map(functools.partial(np.maximum, 0), [y_from, y_to, x_from, x_to])) try: char_img = origin_image[y_from:y_to, x_from:x_to] char_img", "center_coords) # noqa pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2) y_ratio", "= kuzu_seg.load_image(img_fp) pred_bbox, pred_center = kuzu_seg.predict(img) # get all polygon area in image", "= make_contours(pred_bbox) # get all center points by contour method center_coords = get_centers(pred_center.astype(np.uint8))", "- y_min # convert to original coordinates x = int(x * x_ratio) w", "+ w + FONT_SIZE / 4, y + h / 2 - FONT_SIZE),", "y_ratio) # set offset to crop character offset = 5 # percentage y_diff", "polygon_contours = make_contours(pred_bbox) # get all center points by contour method center_coords =", "offset to crop character offset = 5 # percentage y_diff = math.ceil(h *", "y_ratio = origin_h / 512 x_ratio = origin_w / 512 pil_img = Image.fromarray(origin_image).convert('RGBA')", "= KuzuClassify() img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp) pred_bbox, pred_center = kuzu_seg.predict(img) #", "= y_min w = x_max - x_min h = y_max - y_min #", "from tqdm import tqdm import cv2 import numpy as np import matplotlib.pyplot as", "= x_min y = y_min w = x_max - x_min h = y_max", "pred_bbox) # ignore background hex color (=0) for cluster_index in tqdm(range(len(center_coords))[1:]): char_pixel =", "y_ratio) h = int(h * y_ratio) # set offset to crop character offset", "list(map(functools.partial(np.maximum, 0), [y_from, y_to, x_from, x_to])) try: char_img = origin_image[y_from:y_to, x_from:x_to] char_img =", ") except Exception as e: print(e) continue char_img = Image.alpha_composite(pil_img, char_canvas) char_img =", "y_diff x_from = x - x_diff x_to = x + w + x_diff", "offset / 100) # expand area y_from = y - y_diff y_to =", "method center_coords = get_centers(pred_center.astype(np.uint8)) no_center_points = len(center_coords) final_center = vis_pred_center(center_coords, rad=2) # filter", "origin_w / 512 pil_img = Image.fromarray(origin_image).convert('RGBA') char_canvas = Image.new('RGBA', pil_img.size) char_draw = ImageDraw.Draw(char_canvas)", "int(w * x_ratio) y = int(y * y_ratio) h = int(h * y_ratio)", "print(img_fp) filter_polygon = True kuzu_seg = KuzuSegment() kuzu_cls = KuzuClassify() img, origin_image, origin_h,", "cv2.resize(final_bbox, (origin_w, origin_h)) final_center = cv2.resize(final_center, (origin_w, origin_h)) plt.imshow(char_img) plt.imshow(final_bbox, cmap=\"jet\", alpha=0.50) plt.savefig(\"./assets/{}.jpg\".format(time.time()),", "as e: print(e) continue char_img = Image.alpha_composite(pil_img, char_canvas) char_img = char_img.convert(\"RGB\") char_img =", "noqa pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2) y_ratio = origin_h", "5 # percentage y_diff = math.ceil(h * offset / 100) x_diff = math.ceil(w", "get all center points by contour method center_coords = get_centers(pred_center.astype(np.uint8)) no_center_points = len(center_coords)", "y + h + y_diff x_from = x - x_diff x_to = x", "= vis_pred_center(center_coords, rad=2) # filter polygon if filter_polygon: filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) #", "PIL import Image, ImageDraw from pylab import rcParams rcParams['figure.figsize'] = 20, 20 #", "y = y_min w = x_max - x_min h = y_max - y_min", "tune y_from, y_to, x_from, x_to = \\ list(map(functools.partial(np.maximum, 0), [y_from, y_to, x_from, x_to]))", "w = int(w * x_ratio) y = int(y * y_ratio) h = int(h", "get_centers(pred_center.astype(np.uint8)) no_center_points = len(center_coords) final_center = vis_pred_center(center_coords, rad=2) # filter polygon if filter_polygon:", "offset / 100) x_diff = math.ceil(w * offset / 100) # expand area", "y_min # convert to original coordinates x = int(x * x_ratio) w =", "KuzuClassify() img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp) pred_bbox, pred_center = kuzu_seg.predict(img) # get", "* y_ratio) # set offset to crop character offset = 5 # percentage", "consts import FONT_SIZE from utils import ( make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon,", "w = x_max - x_min h = y_max - y_min # convert to", "math.ceil(w * offset / 100) # expand area y_from = y - y_diff", "= math.ceil(w * offset / 100) # expand area y_from = y -", "origin_image[y_from:y_to, x_from:x_to] char_img = kuzu_cls.load_image(char_img) pred_label = kuzu_cls.predict(char_img) # print(pred_label) char_draw.text( (x +", "os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon = True kuzu_seg = KuzuSegment() kuzu_cls = KuzuClassify() img,", "True kuzu_seg = KuzuSegment() kuzu_cls = KuzuClassify() img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp)", "except Exception as e: print(e) continue char_img = Image.alpha_composite(pil_img, char_canvas) char_img = char_img.convert(\"RGB\")", "char_img.convert(\"RGB\") char_img = np.asarray(char_img) final_bbox = cv2.resize(final_bbox, (origin_w, origin_h)) final_center = cv2.resize(final_center, (origin_w,", "as np import matplotlib.pyplot as plt from PIL import Image, ImageDraw from pylab", "grpc_utils import ( KuzuSegment, KuzuClassify ) if __name__ == '__main__': img_dir = \"./images\"", "= get_centers(pred_center.astype(np.uint8)) no_center_points = len(center_coords) final_center = vis_pred_center(center_coords, rad=2) # filter polygon if", "axis=0))[0] vertical_indicies = np.where(np.any(char_pixel, axis=1))[0] x_min, x_max = horizontal_indicies[[0, -1]] y_min, y_max =", "= np.where(np.any(char_pixel, axis=1))[0] x_min, x_max = horizontal_indicies[[0, -1]] y_min, y_max = vertical_indicies[[0, -1]]", "y_max = vertical_indicies[[0, -1]] except IndexError: continue x = x_min y = y_min", "filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font ) from grpc_utils import ( KuzuSegment, KuzuClassify ) if", "-1]] except IndexError: continue x = x_min y = y_min w = x_max", "/ 512 pil_img = Image.fromarray(origin_image).convert('RGBA') char_canvas = Image.new('RGBA', pil_img.size) char_draw = ImageDraw.Draw(char_canvas) print(\">>>", "255, 255), font=font ) except Exception as e: print(e) continue char_img = Image.alpha_composite(pil_img,", "import ( make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font ) from grpc_utils", "= y - y_diff y_to = y + h + y_diff x_from =", "pred_label = kuzu_cls.predict(char_img) # print(pred_label) char_draw.text( (x + w + FONT_SIZE / 4,", "all polygon area in image polygon_contours = make_contours(pred_bbox) # get all center points", "x = int(x * x_ratio) w = int(w * x_ratio) y = int(y", "# print(pred_label) char_draw.text( (x + w + FONT_SIZE / 4, y + h", "to original coordinates x = int(x * x_ratio) w = int(w * x_ratio)", "# convert to original coordinates x = int(x * x_ratio) w = int(w", "# ignore background hex color (=0) for cluster_index in tqdm(range(len(center_coords))[1:]): char_pixel = (bbox_cluster", "[y_from, y_to, x_from, x_to])) try: char_img = origin_image[y_from:y_to, x_from:x_to] char_img = kuzu_cls.load_image(char_img) pred_label", "char_img = Image.alpha_composite(pil_img, char_canvas) char_img = char_img.convert(\"RGB\") char_img = np.asarray(char_img) final_bbox = cv2.resize(final_bbox,", "(origin_w, origin_h)) final_center = cv2.resize(final_center, (origin_w, origin_h)) plt.imshow(char_img) plt.imshow(final_bbox, cmap=\"jet\", alpha=0.50) plt.savefig(\"./assets/{}.jpg\".format(time.time()), bbox_inches='tight')", "if __name__ == '__main__': img_dir = \"./images\" img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon", "polygon area in image polygon_contours = make_contours(pred_bbox) # get all center points by", "if filter_polygon: filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox", "tqdm import cv2 import numpy as np import matplotlib.pyplot as plt from PIL", "ignore background hex color (=0) for cluster_index in tqdm(range(len(center_coords))[1:]): char_pixel = (bbox_cluster ==", "import functools import random from tqdm import tqdm import cv2 import numpy as", "x + w + x_diff # tune y_from, y_to, x_from, x_to = \\", "from PIL import Image, ImageDraw from pylab import rcParams rcParams['figure.figsize'] = 20, 20", "# filter polygon if filter_polygon: filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa pred_bbox =", "origin_w = kuzu_seg.load_image(img_fp) pred_bbox, pred_center = kuzu_seg.predict(img) # get all polygon area in", "x_from:x_to] char_img = kuzu_cls.load_image(char_img) pred_label = kuzu_cls.predict(char_img) # print(pred_label) char_draw.text( (x + w", "x_diff # tune y_from, y_to, x_from, x_to = \\ list(map(functools.partial(np.maximum, 0), [y_from, y_to,", "origin_h / 512 x_ratio = origin_w / 512 pil_img = Image.fromarray(origin_image).convert('RGBA') char_canvas =", "y_max - y_min # convert to original coordinates x = int(x * x_ratio)", "/ 100) x_diff = math.ceil(w * offset / 100) # expand area y_from", "numpy as np import matplotlib.pyplot as plt from PIL import Image, ImageDraw from", "tqdm(range(len(center_coords))[1:]): char_pixel = (bbox_cluster == cluster_index).astype(np.float32) try: horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0] vertical_indicies =", "= x - x_diff x_to = x + w + x_diff # tune", "vis_pred_bbox_polygon, vis_pred_center, font ) from grpc_utils import ( KuzuSegment, KuzuClassify ) if __name__", "bbox_cluster = get_labels(center_coords, pred_bbox) # ignore background hex color (=0) for cluster_index in", "vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2) y_ratio = origin_h / 512 x_ratio", "from utils import ( make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font )", "'__main__': img_dir = \"./images\" img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon = True kuzu_seg", "= math.ceil(h * offset / 100) x_diff = math.ceil(w * offset / 100)", "get all polygon area in image polygon_contours = make_contours(pred_bbox) # get all center", "get_labels(center_coords, pred_bbox) # ignore background hex color (=0) for cluster_index in tqdm(range(len(center_coords))[1:]): char_pixel", "points by contour method center_coords = get_centers(pred_center.astype(np.uint8)) no_center_points = len(center_coords) final_center = vis_pred_center(center_coords,", "len(center_coords) final_center = vis_pred_center(center_coords, rad=2) # filter polygon if filter_polygon: filtered_contours = filter_polygons_points_intersection(polygon_contours,", "pylab import rcParams rcParams['figure.figsize'] = 20, 20 # noqa from consts import FONT_SIZE", "pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2) y_ratio = origin_h /", "np.asarray(char_img) final_bbox = cv2.resize(final_bbox, (origin_w, origin_h)) final_center = cv2.resize(final_center, (origin_w, origin_h)) plt.imshow(char_img) plt.imshow(final_bbox,", "__name__ == '__main__': img_dir = \"./images\" img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon =", "char_img = np.asarray(char_img) final_bbox = cv2.resize(final_bbox, (origin_w, origin_h)) final_center = cv2.resize(final_center, (origin_w, origin_h))", "# noqa from consts import FONT_SIZE from utils import ( make_contours, get_centers, get_labels,", "int(h * y_ratio) # set offset to crop character offset = 5 #", "width=2) y_ratio = origin_h / 512 x_ratio = origin_w / 512 pil_img =", "get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font ) from grpc_utils import ( KuzuSegment,", "h + y_diff x_from = x - x_diff x_to = x + w", "x_to])) try: char_img = origin_image[y_from:y_to, x_from:x_to] char_img = kuzu_cls.load_image(char_img) pred_label = kuzu_cls.predict(char_img) #", "# expand area y_from = y - y_diff y_to = y + h", "character offset = 5 # percentage y_diff = math.ceil(h * offset / 100)", "import rcParams rcParams['figure.figsize'] = 20, 20 # noqa from consts import FONT_SIZE from", "pred_bbox, pred_center = kuzu_seg.predict(img) # get all polygon area in image polygon_contours =", "get_labels, vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font ) from grpc_utils import ( KuzuSegment, KuzuClassify", "(x + w + FONT_SIZE / 4, y + h / 2 -", "kuzu_cls = KuzuClassify() img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp) pred_bbox, pred_center = kuzu_seg.predict(img)", "* offset / 100) x_diff = math.ceil(w * offset / 100) # expand", "y = int(y * y_ratio) h = int(h * y_ratio) # set offset", "= \"./images\" img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon = True kuzu_seg = KuzuSegment()", "x - x_diff x_to = x + w + x_diff # tune y_from,", "area y_from = y - y_diff y_to = y + h + y_diff", "= np.where(np.any(char_pixel, axis=0))[0] vertical_indicies = np.where(np.any(char_pixel, axis=1))[0] x_min, x_max = horizontal_indicies[[0, -1]] y_min,", "== cluster_index).astype(np.float32) try: horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0] vertical_indicies = np.where(np.any(char_pixel, axis=1))[0] x_min, x_max", "x_min, x_max = horizontal_indicies[[0, -1]] y_min, y_max = vertical_indicies[[0, -1]] except IndexError: continue", "expand area y_from = y - y_diff y_to = y + h +", "plt from PIL import Image, ImageDraw from pylab import rcParams rcParams['figure.figsize'] = 20,", "vis_pred_center, font ) from grpc_utils import ( KuzuSegment, KuzuClassify ) if __name__ ==", "IndexError: continue x = x_min y = y_min w = x_max - x_min", "img_dir = \"./images\" img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon = True kuzu_seg =", "import matplotlib.pyplot as plt from PIL import Image, ImageDraw from pylab import rcParams", "import tqdm import cv2 import numpy as np import matplotlib.pyplot as plt from", "kuzu_seg.predict(img) # get all polygon area in image polygon_contours = make_contours(pred_bbox) # get", "y_to, x_from, x_to])) try: char_img = origin_image[y_from:y_to, x_from:x_to] char_img = kuzu_cls.load_image(char_img) pred_label =", "noqa from consts import FONT_SIZE from utils import ( make_contours, get_centers, get_labels, vis_pred_bbox,", "= Image.alpha_composite(pil_img, char_canvas) char_img = char_img.convert(\"RGB\") char_img = np.asarray(char_img) final_bbox = cv2.resize(final_bbox, (origin_w,", "center points by contour method center_coords = get_centers(pred_center.astype(np.uint8)) no_center_points = len(center_coords) final_center =", "final_bbox = cv2.resize(final_bbox, (origin_w, origin_h)) final_center = cv2.resize(final_center, (origin_w, origin_h)) plt.imshow(char_img) plt.imshow(final_bbox, cmap=\"jet\",", "x_ratio = origin_w / 512 pil_img = Image.fromarray(origin_image).convert('RGBA') char_canvas = Image.new('RGBA', pil_img.size) char_draw", "polygon if filter_polygon: filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours)", "x = x_min y = y_min w = x_max - x_min h =", "FONT_SIZE), pred_label, fill=(0, 0, 255, 255), font=font ) except Exception as e: print(e)", "import time import functools import random from tqdm import tqdm import cv2 import", "contour method center_coords = get_centers(pred_center.astype(np.uint8)) no_center_points = len(center_coords) final_center = vis_pred_center(center_coords, rad=2) #", "x_max = horizontal_indicies[[0, -1]] y_min, y_max = vertical_indicies[[0, -1]] except IndexError: continue x", "= origin_h / 512 x_ratio = origin_w / 512 pil_img = Image.fromarray(origin_image).convert('RGBA') char_canvas", "- x_diff x_to = x + w + x_diff # tune y_from, y_to,", "= Image.new('RGBA', pil_img.size) char_draw = ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points)) if no_center_points > 0: bbox_cluster", "np import matplotlib.pyplot as plt from PIL import Image, ImageDraw from pylab import", "KuzuSegment() kuzu_cls = KuzuClassify() img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp) pred_bbox, pred_center =", "= y_max - y_min # convert to original coordinates x = int(x *", "center_coords, width=2) y_ratio = origin_h / 512 x_ratio = origin_w / 512 pil_img", "center_coords = get_centers(pred_center.astype(np.uint8)) no_center_points = len(center_coords) final_center = vis_pred_center(center_coords, rad=2) # filter polygon", "char_canvas = Image.new('RGBA', pil_img.size) char_draw = ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points)) if no_center_points > 0:", "print(\">>> {}\".format(no_center_points)) if no_center_points > 0: bbox_cluster = get_labels(center_coords, pred_bbox) # ignore background", "origin_h, origin_w = kuzu_seg.load_image(img_fp) pred_bbox, pred_center = kuzu_seg.predict(img) # get all polygon area", "fill=(0, 0, 255, 255), font=font ) except Exception as e: print(e) continue char_img", "import ( KuzuSegment, KuzuClassify ) if __name__ == '__main__': img_dir = \"./images\" img_fp", "# get all polygon area in image polygon_contours = make_contours(pred_bbox) # get all", "filter_polygons_points_intersection(polygon_contours, center_coords) # noqa pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2)", "x_min h = y_max - y_min # convert to original coordinates x =", "KuzuSegment, KuzuClassify ) if __name__ == '__main__': img_dir = \"./images\" img_fp = os.path.join(img_dir,", "coordinates x = int(x * x_ratio) w = int(w * x_ratio) y =", "img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp) pred_bbox, pred_center = kuzu_seg.predict(img) # get all", "x_ratio) w = int(w * x_ratio) y = int(y * y_ratio) h =", "x_diff x_to = x + w + x_diff # tune y_from, y_to, x_from,", "# get all center points by contour method center_coords = get_centers(pred_center.astype(np.uint8)) no_center_points =", ") if __name__ == '__main__': img_dir = \"./images\" img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp)", "20 # noqa from consts import FONT_SIZE from utils import ( make_contours, get_centers,", "= int(y * y_ratio) h = int(h * y_ratio) # set offset to", "/ 512 x_ratio = origin_w / 512 pil_img = Image.fromarray(origin_image).convert('RGBA') char_canvas = Image.new('RGBA',", "h / 2 - FONT_SIZE), pred_label, fill=(0, 0, 255, 255), font=font ) except", "cluster_index in tqdm(range(len(center_coords))[1:]): char_pixel = (bbox_cluster == cluster_index).astype(np.float32) try: horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0]", "img_fp = os.path.join(img_dir, random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon = True kuzu_seg = KuzuSegment() kuzu_cls =", "= len(center_coords) final_center = vis_pred_center(center_coords, rad=2) # filter polygon if filter_polygon: filtered_contours =", "ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points)) if no_center_points > 0: bbox_cluster = get_labels(center_coords, pred_bbox) # ignore", "y + h / 2 - FONT_SIZE), pred_label, fill=(0, 0, 255, 255), font=font", "int(x * x_ratio) w = int(w * x_ratio) y = int(y * y_ratio)", "= cv2.resize(final_bbox, (origin_w, origin_h)) final_center = cv2.resize(final_center, (origin_w, origin_h)) plt.imshow(char_img) plt.imshow(final_bbox, cmap=\"jet\", alpha=0.50)", "import cv2 import numpy as np import matplotlib.pyplot as plt from PIL import", "print(pred_label) char_draw.text( (x + w + FONT_SIZE / 4, y + h /", "x_max - x_min h = y_max - y_min # convert to original coordinates", "x_from, x_to])) try: char_img = origin_image[y_from:y_to, x_from:x_to] char_img = kuzu_cls.load_image(char_img) pred_label = kuzu_cls.predict(char_img)", "area in image polygon_contours = make_contours(pred_bbox) # get all center points by contour", "2 - FONT_SIZE), pred_label, fill=(0, 0, 255, 255), font=font ) except Exception as", "(=0) for cluster_index in tqdm(range(len(center_coords))[1:]): char_pixel = (bbox_cluster == cluster_index).astype(np.float32) try: horizontal_indicies =", "Image, ImageDraw from pylab import rcParams rcParams['figure.figsize'] = 20, 20 # noqa from", "random.choice(os.listdir(img_dir))) print(img_fp) filter_polygon = True kuzu_seg = KuzuSegment() kuzu_cls = KuzuClassify() img, origin_image,", "all center points by contour method center_coords = get_centers(pred_center.astype(np.uint8)) no_center_points = len(center_coords) final_center", "= Image.fromarray(origin_image).convert('RGBA') char_canvas = Image.new('RGBA', pil_img.size) char_draw = ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points)) if no_center_points", "/ 2 - FONT_SIZE), pred_label, fill=(0, 0, 255, 255), font=font ) except Exception", "char_canvas) char_img = char_img.convert(\"RGB\") char_img = np.asarray(char_img) final_bbox = cv2.resize(final_bbox, (origin_w, origin_h)) final_center", "np.where(np.any(char_pixel, axis=1))[0] x_min, x_max = horizontal_indicies[[0, -1]] y_min, y_max = vertical_indicies[[0, -1]] except", "from consts import FONT_SIZE from utils import ( make_contours, get_centers, get_labels, vis_pred_bbox, filter_polygons_points_intersection,", "w + x_diff # tune y_from, y_to, x_from, x_to = \\ list(map(functools.partial(np.maximum, 0),", "+ w + x_diff # tune y_from, y_to, x_from, x_to = \\ list(map(functools.partial(np.maximum,", "vis_pred_bbox(pred_bbox, center_coords, width=2) y_ratio = origin_h / 512 x_ratio = origin_w / 512", "continue x = x_min y = y_min w = x_max - x_min h", "in tqdm(range(len(center_coords))[1:]): char_pixel = (bbox_cluster == cluster_index).astype(np.float32) try: horizontal_indicies = np.where(np.any(char_pixel, axis=0))[0] vertical_indicies", "Image.fromarray(origin_image).convert('RGBA') char_canvas = Image.new('RGBA', pil_img.size) char_draw = ImageDraw.Draw(char_canvas) print(\">>> {}\".format(no_center_points)) if no_center_points >", "filtered_contours = filter_polygons_points_intersection(polygon_contours, center_coords) # noqa pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox = vis_pred_bbox(pred_bbox,", "y_to = y + h + y_diff x_from = x - x_diff x_to", "crop character offset = 5 # percentage y_diff = math.ceil(h * offset /", "y - y_diff y_to = y + h + y_diff x_from = x", "horizontal_indicies[[0, -1]] y_min, y_max = vertical_indicies[[0, -1]] except IndexError: continue x = x_min", "font ) from grpc_utils import ( KuzuSegment, KuzuClassify ) if __name__ == '__main__':", "+ FONT_SIZE / 4, y + h / 2 - FONT_SIZE), pred_label, fill=(0,", "try: char_img = origin_image[y_from:y_to, x_from:x_to] char_img = kuzu_cls.load_image(char_img) pred_label = kuzu_cls.predict(char_img) # print(pred_label)", "font=font ) except Exception as e: print(e) continue char_img = Image.alpha_composite(pil_img, char_canvas) char_img", "background hex color (=0) for cluster_index in tqdm(range(len(center_coords))[1:]): char_pixel = (bbox_cluster == cluster_index).astype(np.float32)", "filtered_contours) final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2) y_ratio = origin_h / 512 x_ratio =", "( KuzuSegment, KuzuClassify ) if __name__ == '__main__': img_dir = \"./images\" img_fp =", "# noqa pred_bbox = vis_pred_bbox_polygon(pred_bbox, filtered_contours) final_bbox = vis_pred_bbox(pred_bbox, center_coords, width=2) y_ratio =", "vis_pred_bbox, filter_polygons_points_intersection, vis_pred_bbox_polygon, vis_pred_center, font ) from grpc_utils import ( KuzuSegment, KuzuClassify )", "= KuzuSegment() kuzu_cls = KuzuClassify() img, origin_image, origin_h, origin_w = kuzu_seg.load_image(img_fp) pred_bbox, pred_center", "= x_max - x_min h = y_max - y_min # convert to original", "from grpc_utils import ( KuzuSegment, KuzuClassify ) if __name__ == '__main__': img_dir =", "100) # expand area y_from = y - y_diff y_to = y +" ]
[ "frame: FrameType) -> None: self.signal_received = (sig, frame) def __exit__(self, exc_type: Type, exc_val:", "self.signal_received: self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal: def __enter__(self) -> None: # Prevent signal from propagating", "None def __enter__(self) -> None: # When we're in a thread we can't", "__enter__(self) -> None: # When we're in a thread we can't use signal", "a thread or not \"\"\" self.in_thread = in_thread self.signal_received = None def __enter__(self)", "Type, exc_val: Exception, exc_tb: Traceback) -> None: # Restore signal signal_(SIGINT, self._handler) def", "Signals from types import FrameType from typing import Type class DelayedKeyboardInterrupt: def __init__(self,", "None: # Prevent signal from propagating to child process self._handler = getsignal(SIGINT) ignore_keyboard_interrupt()", "handling if not self.in_thread: self.signal_received = False self.old_handler = signal_(SIGINT, self.handler) def handler(self,", "can't use signal handling if not self.in_thread: self.signal_received = False self.old_handler = signal_(SIGINT,", "not self.in_thread: signal_(SIGINT, self.old_handler) if self.signal_received: self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal: def __enter__(self) -> None:", "in_thread: bool = False) -> None: \"\"\" :param in_thread: Whether or not we're", "= signal_(SIGINT, self.handler) def handler(self, sig: Signals, frame: FrameType) -> None: self.signal_received =", "self.in_thread: signal_(SIGINT, self.old_handler) if self.signal_received: self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal: def __enter__(self) -> None: #", "from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals from types import", "-> None: # When we're in a thread we can't use signal handling", "handler(self, sig: Signals, frame: FrameType) -> None: self.signal_received = (sig, frame) def __exit__(self,", "from typing import Type class DelayedKeyboardInterrupt: def __init__(self, in_thread: bool = False) ->", "getsignal(SIGINT) ignore_keyboard_interrupt() def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None: #", "self.signal_received = None def __enter__(self) -> None: # When we're in a thread", "signal_, Signals from types import FrameType from typing import Type class DelayedKeyboardInterrupt: def", "exc_tb: Traceback) -> None: # Restore signal signal_(SIGINT, self._handler) def ignore_keyboard_interrupt(): signal_(SIGINT, SIG_IGN)", "False self.old_handler = signal_(SIGINT, self.handler) def handler(self, sig: Signals, frame: FrameType) -> None:", "not we're living in a thread or not \"\"\" self.in_thread = in_thread self.signal_received", "= in_thread self.signal_received = None def __enter__(self) -> None: # When we're in", "= False self.old_handler = signal_(SIGINT, self.handler) def handler(self, sig: Signals, frame: FrameType) ->", "self.in_thread: self.signal_received = False self.old_handler = signal_(SIGINT, self.handler) def handler(self, sig: Signals, frame:", "inspect import Traceback from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals", "self.handler) def handler(self, sig: Signals, frame: FrameType) -> None: self.signal_received = (sig, frame)", "class DelayedKeyboardInterrupt: def __init__(self, in_thread: bool = False) -> None: \"\"\" :param in_thread:", "def __init__(self, in_thread: bool = False) -> None: \"\"\" :param in_thread: Whether or", "use signal handling if not self.in_thread: self.signal_received = False self.old_handler = signal_(SIGINT, self.handler)", "types import FrameType from typing import Type class DelayedKeyboardInterrupt: def __init__(self, in_thread: bool", "exc_tb: Traceback) -> None: if not self.in_thread: signal_(SIGINT, self.old_handler) if self.signal_received: self.old_handler(*self.signal_received) class", "Type, exc_val: Exception, exc_tb: Traceback) -> None: if not self.in_thread: signal_(SIGINT, self.old_handler) if", "exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None: # Restore signal signal_(SIGINT, self._handler)", "import FrameType from typing import Type class DelayedKeyboardInterrupt: def __init__(self, in_thread: bool =", "Whether or not we're living in a thread or not \"\"\" self.in_thread =", "propagating to child process self._handler = getsignal(SIGINT) ignore_keyboard_interrupt() def __exit__(self, exc_type: Type, exc_val:", "in_thread self.signal_received = None def __enter__(self) -> None: # When we're in a", "self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal: def __enter__(self) -> None: # Prevent signal from propagating to", "in a thread we can't use signal handling if not self.in_thread: self.signal_received =", "in a thread or not \"\"\" self.in_thread = in_thread self.signal_received = None def", "ignore_keyboard_interrupt() def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None: # Restore", "in_thread: Whether or not we're living in a thread or not \"\"\" self.in_thread", "import Type class DelayedKeyboardInterrupt: def __init__(self, in_thread: bool = False) -> None: \"\"\"", "DelayedKeyboardInterrupt: def __init__(self, in_thread: bool = False) -> None: \"\"\" :param in_thread: Whether", "def __enter__(self) -> None: # Prevent signal from propagating to child process self._handler", "def handler(self, sig: Signals, frame: FrameType) -> None: self.signal_received = (sig, frame) def", "signal_(SIGINT, self.handler) def handler(self, sig: Signals, frame: FrameType) -> None: self.signal_received = (sig,", "FrameType from typing import Type class DelayedKeyboardInterrupt: def __init__(self, in_thread: bool = False)", "SIG_IGN, SIGINT, signal as signal_, Signals from types import FrameType from typing import", "\"\"\" :param in_thread: Whether or not we're living in a thread or not", "def __enter__(self) -> None: # When we're in a thread we can't use", "from propagating to child process self._handler = getsignal(SIGINT) ignore_keyboard_interrupt() def __exit__(self, exc_type: Type,", "Exception, exc_tb: Traceback) -> None: # Restore signal signal_(SIGINT, self._handler) def ignore_keyboard_interrupt(): signal_(SIGINT,", "-> None: if not self.in_thread: signal_(SIGINT, self.old_handler) if self.signal_received: self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal: def", "as signal_, Signals from types import FrameType from typing import Type class DelayedKeyboardInterrupt:", "self.old_handler = signal_(SIGINT, self.handler) def handler(self, sig: Signals, frame: FrameType) -> None: self.signal_received", "Prevent signal from propagating to child process self._handler = getsignal(SIGINT) ignore_keyboard_interrupt() def __exit__(self,", "a thread we can't use signal handling if not self.in_thread: self.signal_received = False", "__init__(self, in_thread: bool = False) -> None: \"\"\" :param in_thread: Whether or not", "if not self.in_thread: signal_(SIGINT, self.old_handler) if self.signal_received: self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal: def __enter__(self) ->", "None: \"\"\" :param in_thread: Whether or not we're living in a thread or", "import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals from types import FrameType from", "we can't use signal handling if not self.in_thread: self.signal_received = False self.old_handler =", "getsignal, SIG_IGN, SIGINT, signal as signal_, Signals from types import FrameType from typing", "Type class DelayedKeyboardInterrupt: def __init__(self, in_thread: bool = False) -> None: \"\"\" :param", "\"\"\" self.in_thread = in_thread self.signal_received = None def __enter__(self) -> None: # When", "Signals, frame: FrameType) -> None: self.signal_received = (sig, frame) def __exit__(self, exc_type: Type,", "-> None: \"\"\" :param in_thread: Whether or not we're living in a thread", "Exception, exc_tb: Traceback) -> None: if not self.in_thread: signal_(SIGINT, self.old_handler) if self.signal_received: self.old_handler(*self.signal_received)", ":param in_thread: Whether or not we're living in a thread or not \"\"\"", "-> None: # Prevent signal from propagating to child process self._handler = getsignal(SIGINT)", "self.old_handler) if self.signal_received: self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal: def __enter__(self) -> None: # Prevent signal", "def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None: if not self.in_thread:", "= False) -> None: \"\"\" :param in_thread: Whether or not we're living in", "exc_val: Exception, exc_tb: Traceback) -> None: if not self.in_thread: signal_(SIGINT, self.old_handler) if self.signal_received:", "thread we can't use signal handling if not self.in_thread: self.signal_received = False self.old_handler", "bool = False) -> None: \"\"\" :param in_thread: Whether or not we're living", "self.signal_received = (sig, frame) def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) ->", "to child process self._handler = getsignal(SIGINT) ignore_keyboard_interrupt() def __exit__(self, exc_type: Type, exc_val: Exception,", "= getsignal(SIGINT) ignore_keyboard_interrupt() def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None:", "not self.in_thread: self.signal_received = False self.old_handler = signal_(SIGINT, self.handler) def handler(self, sig: Signals,", "exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None: if not self.in_thread: signal_(SIGINT, self.old_handler)", "signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals from types import FrameType", "self.signal_received = False self.old_handler = signal_(SIGINT, self.handler) def handler(self, sig: Signals, frame: FrameType)", "= (sig, frame) def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None:", "FrameType) -> None: self.signal_received = (sig, frame) def __exit__(self, exc_type: Type, exc_val: Exception,", "if not self.in_thread: self.signal_received = False self.old_handler = signal_(SIGINT, self.handler) def handler(self, sig:", "frame) def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None: if not", "SIGINT, signal as signal_, Signals from types import FrameType from typing import Type", "import Traceback from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals from", "signal_(SIGINT, self.old_handler) if self.signal_received: self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal: def __enter__(self) -> None: # Prevent", "False) -> None: \"\"\" :param in_thread: Whether or not we're living in a", "self.in_thread = in_thread self.signal_received = None def __enter__(self) -> None: # When we're", "living in a thread or not \"\"\" self.in_thread = in_thread self.signal_received = None", "process self._handler = getsignal(SIGINT) ignore_keyboard_interrupt() def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback)", "from types import FrameType from typing import Type class DelayedKeyboardInterrupt: def __init__(self, in_thread:", "we're in a thread we can't use signal handling if not self.in_thread: self.signal_received", "(sig, frame) def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None: if", "exc_val: Exception, exc_tb: Traceback) -> None: # Restore signal signal_(SIGINT, self._handler) def ignore_keyboard_interrupt():", "typing import Type class DelayedKeyboardInterrupt: def __init__(self, in_thread: bool = False) -> None:", "When we're in a thread we can't use signal handling if not self.in_thread:", "__exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None: # Restore signal signal_(SIGINT,", "signal handling if not self.in_thread: self.signal_received = False self.old_handler = signal_(SIGINT, self.handler) def", "if self.signal_received: self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal: def __enter__(self) -> None: # Prevent signal from", "thread or not \"\"\" self.in_thread = in_thread self.signal_received = None def __enter__(self) ->", "Traceback from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals from types", "None: self.signal_received = (sig, frame) def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback)", "self._handler = getsignal(SIGINT) ignore_keyboard_interrupt() def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) ->", "DisableKeyboardInterruptSignal: def __enter__(self) -> None: # Prevent signal from propagating to child process", "or not we're living in a thread or not \"\"\" self.in_thread = in_thread", "__exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None: if not self.in_thread: signal_(SIGINT,", "-> None: self.signal_received = (sig, frame) def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb:", "None: if not self.in_thread: signal_(SIGINT, self.old_handler) if self.signal_received: self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal: def __enter__(self)", "not \"\"\" self.in_thread = in_thread self.signal_received = None def __enter__(self) -> None: #", "sig: Signals, frame: FrameType) -> None: self.signal_received = (sig, frame) def __exit__(self, exc_type:", "signal from propagating to child process self._handler = getsignal(SIGINT) ignore_keyboard_interrupt() def __exit__(self, exc_type:", "class DisableKeyboardInterruptSignal: def __enter__(self) -> None: # Prevent signal from propagating to child", "= None def __enter__(self) -> None: # When we're in a thread we", "or not \"\"\" self.in_thread = in_thread self.signal_received = None def __enter__(self) -> None:", "def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None: # Restore signal", "__enter__(self) -> None: # Prevent signal from propagating to child process self._handler =", "from inspect import Traceback from signal import getsignal, SIG_IGN, SIGINT, signal as signal_,", "child process self._handler = getsignal(SIGINT) ignore_keyboard_interrupt() def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb:", "# When we're in a thread we can't use signal handling if not", "Traceback) -> None: if not self.in_thread: signal_(SIGINT, self.old_handler) if self.signal_received: self.old_handler(*self.signal_received) class DisableKeyboardInterruptSignal:", "signal as signal_, Signals from types import FrameType from typing import Type class", "we're living in a thread or not \"\"\" self.in_thread = in_thread self.signal_received =", "# Prevent signal from propagating to child process self._handler = getsignal(SIGINT) ignore_keyboard_interrupt() def", "None: # When we're in a thread we can't use signal handling if" ]
[ "win32com.propsys import propsys from win32com.shell import shell except ImportError: raise ImportError( \"pywin32 is", "namespace, values: Any, option_string=None): if values.suffix != \".ico\": raise ValueError(\"The supplied icon file", "appName: str, iconPath: Optional[Path] = None, overwrite: bool = False, appDataPath: str =", "ID for identification\") parser.add_argument(\"--name\", \"-n\", type=str, required=True, help=\"Display name on notification\") parser.add_argument( \"--icon\",", "# pragma: no cover parser = argparse.ArgumentParser(description=\"Create shell link for use in toast", "help=\"Overwrite if a link already exists\" ) args = parser.parse_args() create_shell_link( appId=args.app_id, appName=args.name,", "/ f\"{appName}.lnk\" linkExists = shellLinkPath.exists() if linkExists: # pragma: no cover if overwrite:", ".ico.\") setattr(namespace, self.dest, values) # noinspection PyUnresolvedReferences def create_shell_link( appId: str, appName: str,", ") # Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection PyTypeChecker shellLink = pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None,", "file # noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully {'modified' if linkExists else 'created'} shell", "Optional[Path] = None, overwrite: bool = False, appDataPath: str = os.getenv(\"APPDATA\"), ): #", "cover raise RuntimeError(\"Couldn't find APPDATA path. Please rerun this script with the --appdata", "shell.IID_IShellLink ) # Set shell link arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if iconPath is", "raise RuntimeError(\"Couldn't find APPDATA path. Please rerun this script with the --appdata argument\")", "parser = argparse.ArgumentParser(description=\"Create shell link for use in toast notifications\") parser.add_argument(\"--appdata\", \"-ad\", type=str,", "no cover parser = argparse.ArgumentParser(description=\"Create shell link for use in toast notifications\") parser.add_argument(\"--appdata\",", "notification\") parser.add_argument( \"--icon\", \"-i\", type=Path, required=False, action=IconFileAction, help=\"Path to image file for desired", "!= \".ico\": raise ValueError(\"The supplied icon file is not of type .ico.\") setattr(namespace,", "0) # Set AUMI to supplied argument propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\")", "use in toast notifications\") parser.add_argument(\"--appdata\", \"-ad\", type=str, required=False, help=\"AppData path if script fails", "Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection PyTypeChecker shellLink = pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink", "link for use in toast notifications\") parser.add_argument(\"--appdata\", \"-ad\", type=str, required=False, help=\"AppData path if", ") class IconFileAction(argparse.Action): # pragma: no cover def __call__(self, parser_container, namespace, values: Any,", ") else: parser.add_argument( \"--overwrite\", \"-o\", default=False, action=\"store_true\", help=\"Overwrite if a link already exists\"", "None: shellLink.SetIconLocation(str(iconPath.resolve()), 0) # Set AUMI to supplied argument propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey", "\"-i\", type=Path, required=False, action=IconFileAction, help=\"Path to image file for desired icon\" ) if", "a link already exists\" ) else: parser.add_argument( \"--overwrite\", \"-o\", default=False, action=\"store_true\", help=\"Overwrite if", "PyUnresolvedReferences def create_shell_link( appId: str, appName: str, iconPath: Optional[Path] = None, overwrite: bool", "link already exists\" ) else: parser.add_argument( \"--overwrite\", \"-o\", default=False, action=\"store_true\", help=\"Overwrite if a", "= shellLinkPath.exists() if linkExists: # pragma: no cover if overwrite: print(\"Script run with", "= argparse.ArgumentParser(description=\"Create shell link for use in toast notifications\") parser.add_argument(\"--appdata\", \"-ad\", type=str, required=False,", "type=str, required=False, help=\"AppData path if script fails to find it\") parser.add_argument(\"--app_id\", \"-a\", type=str,", "on notification\") parser.add_argument( \"--icon\", \"-i\", type=Path, required=False, action=IconFileAction, help=\"Path to image file for", "print(f\"Successfully {'modified' if linkExists else 'created'} shell link with the AUMI '{appId}'\") if", "in a terminal\" ) class IconFileAction(argparse.Action): # pragma: no cover def __call__(self, parser_container,", "help=\"Path to image file for desired icon\" ) if sys.version_info >= (3, 9):", "argparse.ArgumentParser(description=\"Create shell link for use in toast notifications\") parser.add_argument(\"--appdata\", \"-ad\", type=str, required=False, help=\"AppData", "link with the AUMI '{appId}'\") if __name__ == \"__main__\": # pragma: no cover", "link already exists\" ) args = parser.parse_args() create_shell_link( appId=args.app_id, appName=args.name, iconPath=args.icon, overwrite=args.overwrite, appDataPath=args.appdata", "pywin32' in a terminal\" ) class IconFileAction(argparse.Action): # pragma: no cover def __call__(self,", "toast notifications\") parser.add_argument(\"--appdata\", \"-ad\", type=str, required=False, help=\"AppData path if script fails to find", "https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection PyTypeChecker shellLink = pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink ) #", "propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit() # Save file # noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully {'modified'", "shell link with the AUMI '{appId}'\") if __name__ == \"__main__\": # pragma: no", "Path from typing import Any, Optional try: import pythoncom from win32com.propsys import propsys", "option_string=None): if values.suffix != \".ico\": raise ValueError(\"The supplied icon file is not of", "for identification\") parser.add_argument(\"--name\", \"-n\", type=str, required=True, help=\"Display name on notification\") parser.add_argument( \"--icon\", \"-i\",", "os import sys from pathlib import Path from typing import Any, Optional try:", "not None: shellLink.SetIconLocation(str(iconPath.resolve()), 0) # Set AUMI to supplied argument propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore)", "no cover if overwrite: print(\"Script run with --overwrite, overwriting existing link...\") else: sys.exit(", "PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully {'modified' if linkExists else 'created'} shell link with the", "shellLinkPath.exists() if linkExists: # pragma: no cover if overwrite: print(\"Script run with --overwrite,", "f\"{appName}.lnk\" linkExists = shellLinkPath.exists() if linkExists: # pragma: no cover if overwrite: print(\"Script", "run create_shell_link.py.To install, execute 'pip install pywin32' in a terminal\" ) class IconFileAction(argparse.Action):", "\"Programs\" shellLinkPath = programsPath / f\"{appName}.lnk\" linkExists = shellLinkPath.exists() if linkExists: # pragma:", "cover parser = argparse.ArgumentParser(description=\"Create shell link for use in toast notifications\") parser.add_argument(\"--appdata\", \"-ad\",", "# pragma: no cover raise RuntimeError(\"Couldn't find APPDATA path. Please rerun this script", "arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if iconPath is not None: shellLink.SetIconLocation(str(iconPath.resolve()), 0) # Set", "== \"__main__\": # pragma: no cover parser = argparse.ArgumentParser(description=\"Create shell link for use", "is None: # pragma: no cover raise RuntimeError(\"Couldn't find APPDATA path. Please rerun", "Set shell link arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if iconPath is not None: shellLink.SetIconLocation(str(iconPath.resolve()),", "def __call__(self, parser_container, namespace, values: Any, option_string=None): if values.suffix != \".ico\": raise ValueError(\"The", "default=False, action=\"store_true\", help=\"Overwrite if a link already exists\" ) args = parser.parse_args() create_shell_link(", "propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit() # Save file #", "is not None: shellLink.SetIconLocation(str(iconPath.resolve()), 0) # Set AUMI to supplied argument propertyStore =", "# pragma: no cover if overwrite: print(\"Script run with --overwrite, overwriting existing link...\")", "shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if iconPath is not None: shellLink.SetIconLocation(str(iconPath.resolve()), 0) # Set AUMI to", "linkExists: # pragma: no cover if overwrite: print(\"Script run with --overwrite, overwriting existing", "not of type .ico.\") setattr(namespace, self.dest, values) # noinspection PyUnresolvedReferences def create_shell_link( appId:", "iconPath is not None: shellLink.SetIconLocation(str(iconPath.resolve()), 0) # Set AUMI to supplied argument propertyStore", "sys from pathlib import Path from typing import Any, Optional try: import pythoncom", "import shell except ImportError: raise ImportError( \"pywin32 is required to run create_shell_link.py.To install,", "propertyStore.Commit() # Save file # noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully {'modified' if linkExists", "required=False, help=\"AppData path if script fails to find it\") parser.add_argument(\"--app_id\", \"-a\", type=str, required=True,", "linkExists else 'created'} shell link with the AUMI '{appId}'\") if __name__ == \"__main__\":", "the --overwrite argument\" ) # Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection PyTypeChecker shellLink =", "def create_shell_link( appId: str, appName: str, iconPath: Optional[Path] = None, overwrite: bool =", "script fails to find it\") parser.add_argument(\"--app_id\", \"-a\", type=str, required=True, help=\"Application User Model ID", "import argparse import os import sys from pathlib import Path from typing import", "values) # noinspection PyUnresolvedReferences def create_shell_link( appId: str, appName: str, iconPath: Optional[Path] =", "script with the --appdata argument\") programsPath = Path(appDataPath) / \"Microsoft\" / \"Windows\" /", "# noinspection PyUnresolvedReferences def create_shell_link( appId: str, appName: str, iconPath: Optional[Path] = None,", "this script with the --appdata argument\") programsPath = Path(appDataPath) / \"Microsoft\" / \"Windows\"", "--overwrite argument\" ) # Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection PyTypeChecker shellLink = pythoncom.CoCreateInstance(", "already exists\" ) else: parser.add_argument( \"--overwrite\", \"-o\", default=False, action=\"store_true\", help=\"Overwrite if a link", "supplied icon file is not of type .ico.\") setattr(namespace, self.dest, values) # noinspection", "# Save file # noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully {'modified' if linkExists else", "\"pywin32 is required to run create_shell_link.py.To install, execute 'pip install pywin32' in a", "from win32com.propsys import propsys from win32com.shell import shell except ImportError: raise ImportError( \"pywin32", "To overwrite, rerun this script with the --overwrite argument\" ) # Adapted from", "pragma: no cover parser = argparse.ArgumentParser(description=\"Create shell link for use in toast notifications\")", "if a link already exists\" ) else: parser.add_argument( \"--overwrite\", \"-o\", default=False, action=\"store_true\", help=\"Overwrite", "required=False, action=IconFileAction, help=\"Path to image file for desired icon\" ) if sys.version_info >=", "help=\"Application User Model ID for identification\") parser.add_argument(\"--name\", \"-n\", type=str, required=True, help=\"Display name on", "\"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite if a link already exists\" ) else: parser.add_argument( \"--overwrite\", \"-o\",", "the --appdata argument\") programsPath = Path(appDataPath) / \"Microsoft\" / \"Windows\" / \"Start Menu\"", "= None, overwrite: bool = False, appDataPath: str = os.getenv(\"APPDATA\"), ): # See", "to image file for desired icon\" ) if sys.version_info >= (3, 9): parser.add_argument(", "for use in toast notifications\") parser.add_argument(\"--appdata\", \"-ad\", type=str, required=False, help=\"AppData path if script", "pragma: no cover def __call__(self, parser_container, namespace, values: Any, option_string=None): if values.suffix !=", "shell link arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if iconPath is not None: shellLink.SetIconLocation(str(iconPath.resolve()), 0)", "'{shellLinkPath}' already exists. To overwrite, rerun this script with the --overwrite argument\" )", "AUMI '{appId}'\") if __name__ == \"__main__\": # pragma: no cover parser = argparse.ArgumentParser(description=\"Create", "overwrite, rerun this script with the --overwrite argument\" ) # Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py", "action=\"store_true\", help=\"Overwrite if a link already exists\" ) args = parser.parse_args() create_shell_link( appId=args.app_id,", "programsPath = Path(appDataPath) / \"Microsoft\" / \"Windows\" / \"Start Menu\" / \"Programs\" shellLinkPath", "with --overwrite, overwriting existing link...\") else: sys.exit( f\"Link '{shellLinkPath}' already exists. To overwrite,", "appDataPath: str = os.getenv(\"APPDATA\"), ): # See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath is None: #", "find it\") parser.add_argument(\"--app_id\", \"-a\", type=str, required=True, help=\"Application User Model ID for identification\") parser.add_argument(\"--name\",", "values: Any, option_string=None): if values.suffix != \".ico\": raise ValueError(\"The supplied icon file is", "\".ico\": raise ValueError(\"The supplied icon file is not of type .ico.\") setattr(namespace, self.dest,", "pythoncom from win32com.propsys import propsys from win32com.shell import shell except ImportError: raise ImportError(", "cover def __call__(self, parser_container, namespace, values: Any, option_string=None): if values.suffix != \".ico\": raise", "'{appId}'\") if __name__ == \"__main__\": # pragma: no cover parser = argparse.ArgumentParser(description=\"Create shell", "self.dest, values) # noinspection PyUnresolvedReferences def create_shell_link( appId: str, appName: str, iconPath: Optional[Path]", "raise ImportError( \"pywin32 is required to run create_shell_link.py.To install, execute 'pip install pywin32'", "fails to find it\") parser.add_argument(\"--app_id\", \"-a\", type=str, required=True, help=\"Application User Model ID for", "appId: str, appName: str, iconPath: Optional[Path] = None, overwrite: bool = False, appDataPath:", "__name__ == \"__main__\": # pragma: no cover parser = argparse.ArgumentParser(description=\"Create shell link for", "pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink ) # Set shell link arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if iconPath", "See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath is None: # pragma: no cover raise RuntimeError(\"Couldn't find", "\"-ad\", type=str, required=False, help=\"AppData path if script fails to find it\") parser.add_argument(\"--app_id\", \"-a\",", "parser.add_argument( \"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite if a link already exists\" ) else: parser.add_argument(", "type .ico.\") setattr(namespace, self.dest, values) # noinspection PyUnresolvedReferences def create_shell_link( appId: str, appName:", "is not of type .ico.\") setattr(namespace, self.dest, values) # noinspection PyUnresolvedReferences def create_shell_link(", "identification\") parser.add_argument(\"--name\", \"-n\", type=str, required=True, help=\"Display name on notification\") parser.add_argument( \"--icon\", \"-i\", type=Path,", "typing import Any, Optional try: import pythoncom from win32com.propsys import propsys from win32com.shell", "path if script fails to find it\") parser.add_argument(\"--app_id\", \"-a\", type=str, required=True, help=\"Application User", "--appdata argument\") programsPath = Path(appDataPath) / \"Microsoft\" / \"Windows\" / \"Start Menu\" /", "of type .ico.\") setattr(namespace, self.dest, values) # noinspection PyUnresolvedReferences def create_shell_link( appId: str,", "values.suffix != \".ico\": raise ValueError(\"The supplied icon file is not of type .ico.\")", "to find it\") parser.add_argument(\"--app_id\", \"-a\", type=str, required=True, help=\"Application User Model ID for identification\")", "else: sys.exit( f\"Link '{shellLinkPath}' already exists. To overwrite, rerun this script with the", "= os.getenv(\"APPDATA\"), ): # See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath is None: # pragma: no", "raise ValueError(\"The supplied icon file is not of type .ico.\") setattr(namespace, self.dest, values)", "(3, 9): parser.add_argument( \"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite if a link already exists\" )", "shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink ) # Set shell link arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\")", "this script with the --overwrite argument\" ) # Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection", ">= (3, 9): parser.add_argument( \"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite if a link already exists\"", "rerun this script with the --appdata argument\") programsPath = Path(appDataPath) / \"Microsoft\" /", "from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection PyTypeChecker shellLink = pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink )", "False, appDataPath: str = os.getenv(\"APPDATA\"), ): # See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath is None:", "rerun this script with the --overwrite argument\" ) # Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py #", "RuntimeError(\"Couldn't find APPDATA path. Please rerun this script with the --appdata argument\") programsPath", "shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit() # Save file # noinspection PyUnresolvedReferences", "action=argparse.BooleanOptionalAction, help=\"Overwrite if a link already exists\" ) else: parser.add_argument( \"--overwrite\", \"-o\", default=False,", "create_shell_link.py.To install, execute 'pip install pywin32' in a terminal\" ) class IconFileAction(argparse.Action): #", "AUMI to supplied argument propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit()", "with the --appdata argument\") programsPath = Path(appDataPath) / \"Microsoft\" / \"Windows\" / \"Start", "except ImportError: raise ImportError( \"pywin32 is required to run create_shell_link.py.To install, execute 'pip", "import sys from pathlib import Path from typing import Any, Optional try: import", "argument propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit() # Save file", "pathlib import Path from typing import Any, Optional try: import pythoncom from win32com.propsys", "terminal\" ) class IconFileAction(argparse.Action): # pragma: no cover def __call__(self, parser_container, namespace, values:", "= shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit() # Save file # noinspection", "help=\"AppData path if script fails to find it\") parser.add_argument(\"--app_id\", \"-a\", type=str, required=True, help=\"Application", "if a link already exists\" ) args = parser.parse_args() create_shell_link( appId=args.app_id, appName=args.name, iconPath=args.icon,", "\"Windows\" / \"Start Menu\" / \"Programs\" shellLinkPath = programsPath / f\"{appName}.lnk\" linkExists =", "parser_container, namespace, values: Any, option_string=None): if values.suffix != \".ico\": raise ValueError(\"The supplied icon", "/ \"Programs\" shellLinkPath = programsPath / f\"{appName}.lnk\" linkExists = shellLinkPath.exists() if linkExists: #", "script with the --overwrite argument\" ) # Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection PyTypeChecker", "# Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection PyTypeChecker shellLink = pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER,", "the AUMI '{appId}'\") if __name__ == \"__main__\": # pragma: no cover parser =", "with the --overwrite argument\" ) # Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection PyTypeChecker shellLink", "required to run create_shell_link.py.To install, execute 'pip install pywin32' in a terminal\" )", "shellLink = pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink ) # Set shell link arguments", "ImportError: raise ImportError( \"pywin32 is required to run create_shell_link.py.To install, execute 'pip install", "Menu\" / \"Programs\" shellLinkPath = programsPath / f\"{appName}.lnk\" linkExists = shellLinkPath.exists() if linkExists:", "shellLink.SetIconLocation(str(iconPath.resolve()), 0) # Set AUMI to supplied argument propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey =", "/ \"Start Menu\" / \"Programs\" shellLinkPath = programsPath / f\"{appName}.lnk\" linkExists = shellLinkPath.exists()", "install pywin32' in a terminal\" ) class IconFileAction(argparse.Action): # pragma: no cover def", "from win32com.shell import shell except ImportError: raise ImportError( \"pywin32 is required to run", "bool = False, appDataPath: str = os.getenv(\"APPDATA\"), ): # See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath", "sys.version_info >= (3, 9): parser.add_argument( \"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite if a link already", "import Any, Optional try: import pythoncom from win32com.propsys import propsys from win32com.shell import", "if values.suffix != \".ico\": raise ValueError(\"The supplied icon file is not of type", "propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit() # Save file # noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully", "if linkExists: # pragma: no cover if overwrite: print(\"Script run with --overwrite, overwriting", "= pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink ) # Set shell link arguments shellLink.SetPath(\"\")", "# noinspection PyTypeChecker shellLink = pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink ) # Set", "'created'} shell link with the AUMI '{appId}'\") if __name__ == \"__main__\": # pragma:", "help=\"Display name on notification\") parser.add_argument( \"--icon\", \"-i\", type=Path, required=False, action=IconFileAction, help=\"Path to image", "help=\"Overwrite if a link already exists\" ) else: parser.add_argument( \"--overwrite\", \"-o\", default=False, action=\"store_true\",", "Path(appDataPath) / \"Microsoft\" / \"Windows\" / \"Start Menu\" / \"Programs\" shellLinkPath = programsPath", "# pragma: no cover def __call__(self, parser_container, namespace, values: Any, option_string=None): if values.suffix", "icon\" ) if sys.version_info >= (3, 9): parser.add_argument( \"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite if", "argument\" ) # Adapted from https://github.com/mhammond/pywin32/blob/main/com/win32comext/shell/demos/create_link.py # noinspection PyTypeChecker shellLink = pythoncom.CoCreateInstance( shell.CLSID_ShellLink,", "argument\") programsPath = Path(appDataPath) / \"Microsoft\" / \"Windows\" / \"Start Menu\" / \"Programs\"", "for desired icon\" ) if sys.version_info >= (3, 9): parser.add_argument( \"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction,", "= Path(appDataPath) / \"Microsoft\" / \"Windows\" / \"Start Menu\" / \"Programs\" shellLinkPath =", "image file for desired icon\" ) if sys.version_info >= (3, 9): parser.add_argument( \"--overwrite\",", "str = os.getenv(\"APPDATA\"), ): # See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath is None: # pragma:", "parser.add_argument(\"--name\", \"-n\", type=str, required=True, help=\"Display name on notification\") parser.add_argument( \"--icon\", \"-i\", type=Path, required=False,", "pragma: no cover raise RuntimeError(\"Couldn't find APPDATA path. Please rerun this script with", "None, overwrite: bool = False, appDataPath: str = os.getenv(\"APPDATA\"), ): # See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594", "from pathlib import Path from typing import Any, Optional try: import pythoncom from", "\"Start Menu\" / \"Programs\" shellLinkPath = programsPath / f\"{appName}.lnk\" linkExists = shellLinkPath.exists() if", "Please rerun this script with the --appdata argument\") programsPath = Path(appDataPath) / \"Microsoft\"", "parser.add_argument( \"--icon\", \"-i\", type=Path, required=False, action=IconFileAction, help=\"Path to image file for desired icon\"", "overwriting existing link...\") else: sys.exit( f\"Link '{shellLinkPath}' already exists. To overwrite, rerun this", "with the AUMI '{appId}'\") if __name__ == \"__main__\": # pragma: no cover parser", "if iconPath is not None: shellLink.SetIconLocation(str(iconPath.resolve()), 0) # Set AUMI to supplied argument", "from typing import Any, Optional try: import pythoncom from win32com.propsys import propsys from", "win32com.shell import shell except ImportError: raise ImportError( \"pywin32 is required to run create_shell_link.py.To", "parser.add_argument(\"--app_id\", \"-a\", type=str, required=True, help=\"Application User Model ID for identification\") parser.add_argument(\"--name\", \"-n\", type=str,", "\"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite if a link already exists\" ) else: parser.add_argument( \"--overwrite\",", "file for desired icon\" ) if sys.version_info >= (3, 9): parser.add_argument( \"--overwrite\", \"-o\",", "if overwrite: print(\"Script run with --overwrite, overwriting existing link...\") else: sys.exit( f\"Link '{shellLinkPath}'", "appDataPath is None: # pragma: no cover raise RuntimeError(\"Couldn't find APPDATA path. Please", "os.getenv(\"APPDATA\"), ): # See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath is None: # pragma: no cover", "= False, appDataPath: str = os.getenv(\"APPDATA\"), ): # See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath is", "ValueError(\"The supplied icon file is not of type .ico.\") setattr(namespace, self.dest, values) #", "if linkExists else 'created'} shell link with the AUMI '{appId}'\") if __name__ ==", "print(\"Script run with --overwrite, overwriting existing link...\") else: sys.exit( f\"Link '{shellLinkPath}' already exists.", "required=True, help=\"Application User Model ID for identification\") parser.add_argument(\"--name\", \"-n\", type=str, required=True, help=\"Display name", "# Set AUMI to supplied argument propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey,", "cover if overwrite: print(\"Script run with --overwrite, overwriting existing link...\") else: sys.exit( f\"Link", "'pip install pywin32' in a terminal\" ) class IconFileAction(argparse.Action): # pragma: no cover", "# noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully {'modified' if linkExists else 'created'} shell link", "Optional try: import pythoncom from win32com.propsys import propsys from win32com.shell import shell except", "action=IconFileAction, help=\"Path to image file for desired icon\" ) if sys.version_info >= (3,", "type=Path, required=False, action=IconFileAction, help=\"Path to image file for desired icon\" ) if sys.version_info", "\"__main__\": # pragma: no cover parser = argparse.ArgumentParser(description=\"Create shell link for use in", "str, appName: str, iconPath: Optional[Path] = None, overwrite: bool = False, appDataPath: str", "noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully {'modified' if linkExists else 'created'} shell link with", "if sys.version_info >= (3, 9): parser.add_argument( \"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite if a link", "True) print(f\"Successfully {'modified' if linkExists else 'created'} shell link with the AUMI '{appId}'\")", "already exists\" ) args = parser.parse_args() create_shell_link( appId=args.app_id, appName=args.name, iconPath=args.icon, overwrite=args.overwrite, appDataPath=args.appdata )", "link arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if iconPath is not None: shellLink.SetIconLocation(str(iconPath.resolve()), 0) #", "create_shell_link( appId: str, appName: str, iconPath: Optional[Path] = None, overwrite: bool = False,", "argparse import os import sys from pathlib import Path from typing import Any,", "shellLinkPath = programsPath / f\"{appName}.lnk\" linkExists = shellLinkPath.exists() if linkExists: # pragma: no", "class IconFileAction(argparse.Action): # pragma: no cover def __call__(self, parser_container, namespace, values: Any, option_string=None):", "import pythoncom from win32com.propsys import propsys from win32com.shell import shell except ImportError: raise", "programsPath / f\"{appName}.lnk\" linkExists = shellLinkPath.exists() if linkExists: # pragma: no cover if", "propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit() # Save file # noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath),", "if __name__ == \"__main__\": # pragma: no cover parser = argparse.ArgumentParser(description=\"Create shell link", "shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if iconPath is not None: shellLink.SetIconLocation(str(iconPath.resolve()), 0) # Set AUMI", "noinspection PyTypeChecker shellLink = pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink ) # Set shell", "supplied argument propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit() # Save", "it\") parser.add_argument(\"--app_id\", \"-a\", type=str, required=True, help=\"Application User Model ID for identification\") parser.add_argument(\"--name\", \"-n\",", "propsys.PROPVARIANTType(appId)) propertyStore.Commit() # Save file # noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully {'modified' if", "else: parser.add_argument( \"--overwrite\", \"-o\", default=False, action=\"store_true\", help=\"Overwrite if a link already exists\" )", "\"-n\", type=str, required=True, help=\"Display name on notification\") parser.add_argument( \"--icon\", \"-i\", type=Path, required=False, action=IconFileAction,", "sys.exit( f\"Link '{shellLinkPath}' already exists. To overwrite, rerun this script with the --overwrite", "f\"Link '{shellLinkPath}' already exists. To overwrite, rerun this script with the --overwrite argument\"", "{'modified' if linkExists else 'created'} shell link with the AUMI '{appId}'\") if __name__", "a terminal\" ) class IconFileAction(argparse.Action): # pragma: no cover def __call__(self, parser_container, namespace,", "shellLink.SetWorkingDirectory(\"\") if iconPath is not None: shellLink.SetIconLocation(str(iconPath.resolve()), 0) # Set AUMI to supplied", "icon file is not of type .ico.\") setattr(namespace, self.dest, values) # noinspection PyUnresolvedReferences", "propsys from win32com.shell import shell except ImportError: raise ImportError( \"pywin32 is required to", "required=True, help=\"Display name on notification\") parser.add_argument( \"--icon\", \"-i\", type=Path, required=False, action=IconFileAction, help=\"Path to", "9): parser.add_argument( \"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite if a link already exists\" ) else:", "APPDATA path. Please rerun this script with the --appdata argument\") programsPath = Path(appDataPath)", "\"--icon\", \"-i\", type=Path, required=False, action=IconFileAction, help=\"Path to image file for desired icon\" )", "overwrite: bool = False, appDataPath: str = os.getenv(\"APPDATA\"), ): # See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if", "type=str, required=True, help=\"Display name on notification\") parser.add_argument( \"--icon\", \"-i\", type=Path, required=False, action=IconFileAction, help=\"Path", "execute 'pip install pywin32' in a terminal\" ) class IconFileAction(argparse.Action): # pragma: no", "\"-a\", type=str, required=True, help=\"Application User Model ID for identification\") parser.add_argument(\"--name\", \"-n\", type=str, required=True,", "python import argparse import os import sys from pathlib import Path from typing", "else 'created'} shell link with the AUMI '{appId}'\") if __name__ == \"__main__\": #", "notifications\") parser.add_argument(\"--appdata\", \"-ad\", type=str, required=False, help=\"AppData path if script fails to find it\")", "if script fails to find it\") parser.add_argument(\"--app_id\", \"-a\", type=str, required=True, help=\"Application User Model", "exists\" ) else: parser.add_argument( \"--overwrite\", \"-o\", default=False, action=\"store_true\", help=\"Overwrite if a link already", "import os import sys from pathlib import Path from typing import Any, Optional", "#!/usr/bin/env python import argparse import os import sys from pathlib import Path from", "link...\") else: sys.exit( f\"Link '{shellLinkPath}' already exists. To overwrite, rerun this script with", "in toast notifications\") parser.add_argument(\"--appdata\", \"-ad\", type=str, required=False, help=\"AppData path if script fails to", "type=str, required=True, help=\"Application User Model ID for identification\") parser.add_argument(\"--name\", \"-n\", type=str, required=True, help=\"Display", "iconPath: Optional[Path] = None, overwrite: bool = False, appDataPath: str = os.getenv(\"APPDATA\"), ):", "Set AUMI to supplied argument propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId))", "no cover raise RuntimeError(\"Couldn't find APPDATA path. Please rerun this script with the", "/ \"Windows\" / \"Start Menu\" / \"Programs\" shellLinkPath = programsPath / f\"{appName}.lnk\" linkExists", "noinspection PyUnresolvedReferences def create_shell_link( appId: str, appName: str, iconPath: Optional[Path] = None, overwrite:", "ImportError( \"pywin32 is required to run create_shell_link.py.To install, execute 'pip install pywin32' in", "install, execute 'pip install pywin32' in a terminal\" ) class IconFileAction(argparse.Action): # pragma:", "\"--overwrite\", \"-o\", default=False, action=\"store_true\", help=\"Overwrite if a link already exists\" ) args =", "parser.add_argument(\"--appdata\", \"-ad\", type=str, required=False, help=\"AppData path if script fails to find it\") parser.add_argument(\"--app_id\",", "to run create_shell_link.py.To install, execute 'pip install pywin32' in a terminal\" ) class", "__call__(self, parser_container, namespace, values: Any, option_string=None): if values.suffix != \".ico\": raise ValueError(\"The supplied", "IconFileAction(argparse.Action): # pragma: no cover def __call__(self, parser_container, namespace, values: Any, option_string=None): if", "/ \"Microsoft\" / \"Windows\" / \"Start Menu\" / \"Programs\" shellLinkPath = programsPath /", "is required to run create_shell_link.py.To install, execute 'pip install pywin32' in a terminal\"", "str, iconPath: Optional[Path] = None, overwrite: bool = False, appDataPath: str = os.getenv(\"APPDATA\"),", "Model ID for identification\") parser.add_argument(\"--name\", \"-n\", type=str, required=True, help=\"Display name on notification\") parser.add_argument(", "PyTypeChecker shellLink = pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink ) # Set shell link", "find APPDATA path. Please rerun this script with the --appdata argument\") programsPath =", "pragma: no cover if overwrite: print(\"Script run with --overwrite, overwriting existing link...\") else:", "\"Microsoft\" / \"Windows\" / \"Start Menu\" / \"Programs\" shellLinkPath = programsPath / f\"{appName}.lnk\"", "propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully {'modified' if linkExists else 'created'} shell link with the AUMI", "\"-o\", default=False, action=\"store_true\", help=\"Overwrite if a link already exists\" ) args = parser.parse_args()", "existing link...\") else: sys.exit( f\"Link '{shellLinkPath}' already exists. To overwrite, rerun this script", "# See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath is None: # pragma: no cover raise RuntimeError(\"Couldn't", "pythoncom.CoCreateInstance( shell.CLSID_ShellLink, None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink ) # Set shell link arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\")", "to supplied argument propertyStore = shellLink.QueryInterface(propsys.IID_IPropertyStore) propertyKey = propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit() #", ") # Set shell link arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if iconPath is not", "path. Please rerun this script with the --appdata argument\") programsPath = Path(appDataPath) /", "shell except ImportError: raise ImportError( \"pywin32 is required to run create_shell_link.py.To install, execute", "shell link for use in toast notifications\") parser.add_argument(\"--appdata\", \"-ad\", type=str, required=False, help=\"AppData path", "desired icon\" ) if sys.version_info >= (3, 9): parser.add_argument( \"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite", "): # See https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath is None: # pragma: no cover raise", "User Model ID for identification\") parser.add_argument(\"--name\", \"-n\", type=str, required=True, help=\"Display name on notification\")", "no cover def __call__(self, parser_container, namespace, values: Any, option_string=None): if values.suffix != \".ico\":", "overwrite: print(\"Script run with --overwrite, overwriting existing link...\") else: sys.exit( f\"Link '{shellLinkPath}' already", "if appDataPath is None: # pragma: no cover raise RuntimeError(\"Couldn't find APPDATA path.", "run with --overwrite, overwriting existing link...\") else: sys.exit( f\"Link '{shellLinkPath}' already exists. To", "Save file # noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True) print(f\"Successfully {'modified' if linkExists else 'created'}", "file is not of type .ico.\") setattr(namespace, self.dest, values) # noinspection PyUnresolvedReferences def", "None, pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink ) # Set shell link arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if", "setattr(namespace, self.dest, values) # noinspection PyUnresolvedReferences def create_shell_link( appId: str, appName: str, iconPath:", "linkExists = shellLinkPath.exists() if linkExists: # pragma: no cover if overwrite: print(\"Script run", "= programsPath / f\"{appName}.lnk\" linkExists = shellLinkPath.exists() if linkExists: # pragma: no cover", "try: import pythoncom from win32com.propsys import propsys from win32com.shell import shell except ImportError:", "import propsys from win32com.shell import shell except ImportError: raise ImportError( \"pywin32 is required", "# Set shell link arguments shellLink.SetPath(\"\") shellLink.SetArguments(\"\") shellLink.SetWorkingDirectory(\"\") if iconPath is not None:", "Any, Optional try: import pythoncom from win32com.propsys import propsys from win32com.shell import shell", "already exists. To overwrite, rerun this script with the --overwrite argument\" ) #", "= propsys.PSGetPropertyKeyFromName(\"System.AppUserModel.ID\") propertyStore.SetValue(propertyKey, propsys.PROPVARIANTType(appId)) propertyStore.Commit() # Save file # noinspection PyUnresolvedReferences propertyStore.QueryInterface(pythoncom.IID_IPersistFile).Save(str(shellLinkPath), True)", "exists. To overwrite, rerun this script with the --overwrite argument\" ) # Adapted", ") if sys.version_info >= (3, 9): parser.add_argument( \"--overwrite\", \"-o\", action=argparse.BooleanOptionalAction, help=\"Overwrite if a", "name on notification\") parser.add_argument( \"--icon\", \"-i\", type=Path, required=False, action=IconFileAction, help=\"Path to image file", "None: # pragma: no cover raise RuntimeError(\"Couldn't find APPDATA path. Please rerun this", "parser.add_argument( \"--overwrite\", \"-o\", default=False, action=\"store_true\", help=\"Overwrite if a link already exists\" ) args", "--overwrite, overwriting existing link...\") else: sys.exit( f\"Link '{shellLinkPath}' already exists. To overwrite, rerun", "a link already exists\" ) args = parser.parse_args() create_shell_link( appId=args.app_id, appName=args.name, iconPath=args.icon, overwrite=args.overwrite,", "import Path from typing import Any, Optional try: import pythoncom from win32com.propsys import", "Any, option_string=None): if values.suffix != \".ico\": raise ValueError(\"The supplied icon file is not", "https://github.com/mohabouje/WinToast/blob/master/src/wintoastlib.cpp#L594 if appDataPath is None: # pragma: no cover raise RuntimeError(\"Couldn't find APPDATA" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "permissions and # limitations under the License. \"\"\"Instrument mysql to report MySQL queries.", "import Pin, patch import mysql.connector # If not patched yet, you can patch", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "Copyright 2019, OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "mysql specifically patch(mysql=True) # This will report a span with the default settings", "License. # You may obtain a copy of the License at # #", "and # limitations under the License. \"\"\"Instrument mysql to report MySQL queries. ``patch_all``", "found on: https://dev.mysql.com/doc/connector-python/en/ \"\"\" from ...utils.importlib import require_modules # check `mysql-connector` availability required_modules", "# Use a pin to specify metadata related to this connection Pin.override(conn, service='mysql-users')", "by _mysql_connector, is not supported yet. Help on mysql.connector can be found on:", "law or agreed to in writing, software # distributed under the License is", "``patch_all`` will automatically patch your mysql connection to make it work. :: #", "settings conn = mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\", port=3306, database=\"test\") cursor = conn.cursor() cursor.execute(\"SELECT 6*7", "the License for the specific language governing permissions and # limitations under the", "AS the_answer;\") # Use a pin to specify metadata related to this connection", "# limitations under the License. \"\"\"Instrument mysql to report MySQL queries. ``patch_all`` will", "\"\"\"Instrument mysql to report MySQL queries. ``patch_all`` will automatically patch your mysql connection", "conn = mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\", port=3306, database=\"test\") cursor = conn.cursor() cursor.execute(\"SELECT 6*7 AS", "compliance with the License. # You may obtain a copy of the License", "import mysql.connector # If not patched yet, you can patch mysql specifically patch(mysql=True)", "C connector, provided by _mysql_connector, is not supported yet. Help on mysql.connector can", "...utils.importlib import require_modules # check `mysql-connector` availability required_modules = ['mysql.connector'] with require_modules(required_modules) as", "Authors # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "connector, provided by _mysql_connector, is not supported yet. Help on mysql.connector can be", "is not supported yet. Help on mysql.connector can be found on: https://dev.mysql.com/doc/connector-python/en/ \"\"\"", "_mysql_connector, is not supported yet. Help on mysql.connector can be found on: https://dev.mysql.com/doc/connector-python/en/", "related to this connection Pin.override(conn, service='mysql-users') Only the default full-Python integration works. The", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "conn.cursor() cursor.execute(\"SELECT 6*7 AS the_answer;\") # Use a pin to specify metadata related", "as missing_modules: if not missing_modules: from .patch import patch from .tracers import get_traced_mysql_connection", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "mysql connection to make it work. :: # Make sure to import mysql.connector", "on: https://dev.mysql.com/doc/connector-python/en/ \"\"\" from ...utils.importlib import require_modules # check `mysql-connector` availability required_modules =", "password=\"<PASSWORD>\", host=\"localhost\", port=3306, database=\"test\") cursor = conn.cursor() cursor.execute(\"SELECT 6*7 AS the_answer;\") # Use", "access to the patched version from oteltrace import Pin, patch import mysql.connector #", "be found on: https://dev.mysql.com/doc/connector-python/en/ \"\"\" from ...utils.importlib import require_modules # check `mysql-connector` availability", "metadata related to this connection Pin.override(conn, service='mysql-users') Only the default full-Python integration works.", "you can patch mysql specifically patch(mysql=True) # This will report a span with", "2019, OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the", "ANY KIND, either express or implied. # See the License for the specific", "from oteltrace import Pin, patch import mysql.connector # If not patched yet, you", "Use a pin to specify metadata related to this connection Pin.override(conn, service='mysql-users') Only", "= conn.cursor() cursor.execute(\"SELECT 6*7 AS the_answer;\") # Use a pin to specify metadata", "require_modules(required_modules) as missing_modules: if not missing_modules: from .patch import patch from .tracers import", "work. :: # Make sure to import mysql.connector and not the 'connect' function,", "from ...utils.importlib import require_modules # check `mysql-connector` availability required_modules = ['mysql.connector'] with require_modules(required_modules)", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "provided by _mysql_connector, is not supported yet. Help on mysql.connector can be found", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "version from oteltrace import Pin, patch import mysql.connector # If not patched yet,", "use this file except in compliance with the License. # You may obtain", "not patched yet, you can patch mysql specifically patch(mysql=True) # This will report", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "cursor.execute(\"SELECT 6*7 AS the_answer;\") # Use a pin to specify metadata related to", "specific language governing permissions and # limitations under the License. \"\"\"Instrument mysql to", "import require_modules # check `mysql-connector` availability required_modules = ['mysql.connector'] with require_modules(required_modules) as missing_modules:", "not use this file except in compliance with the License. # You may", "function, # otherwise you won't have access to the patched version from oteltrace", "to the patched version from oteltrace import Pin, patch import mysql.connector # If", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "if not missing_modules: from .patch import patch from .tracers import get_traced_mysql_connection __all__ =", "a pin to specify metadata related to this connection Pin.override(conn, service='mysql-users') Only the", "not supported yet. Help on mysql.connector can be found on: https://dev.mysql.com/doc/connector-python/en/ \"\"\" from", "check `mysql-connector` availability required_modules = ['mysql.connector'] with require_modules(required_modules) as missing_modules: if not missing_modules:", "See the License for the specific language governing permissions and # limitations under", "patch your mysql connection to make it work. :: # Make sure to", "not the 'connect' function, # otherwise you won't have access to the patched", "it work. :: # Make sure to import mysql.connector and not the 'connect'", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "will automatically patch your mysql connection to make it work. :: # Make", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "patched version from oteltrace import Pin, patch import mysql.connector # If not patched", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "database=\"test\") cursor = conn.cursor() cursor.execute(\"SELECT 6*7 AS the_answer;\") # Use a pin to", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "port=3306, database=\"test\") cursor = conn.cursor() cursor.execute(\"SELECT 6*7 AS the_answer;\") # Use a pin", "to import mysql.connector and not the 'connect' function, # otherwise you won't have", "patched yet, you can patch mysql specifically patch(mysql=True) # This will report a", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "'connect' function, # otherwise you won't have access to the patched version from", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "sure to import mysql.connector and not the 'connect' function, # otherwise you won't", "automatically patch your mysql connection to make it work. :: # Make sure", "OF ANY KIND, either express or implied. # See the License for the", "OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the \"License\");", "MySQL queries. ``patch_all`` will automatically patch your mysql connection to make it work.", "connection to make it work. :: # Make sure to import mysql.connector and", "this connection Pin.override(conn, service='mysql-users') Only the default full-Python integration works. The binary C", "2.0 (the \"License\"); # you may not use this file except in compliance", "limitations under the License. \"\"\"Instrument mysql to report MySQL queries. ``patch_all`` will automatically", "queries. ``patch_all`` will automatically patch your mysql connection to make it work. ::", "default settings conn = mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\", port=3306, database=\"test\") cursor = conn.cursor() cursor.execute(\"SELECT", "# you may not use this file except in compliance with the License.", "with require_modules(required_modules) as missing_modules: if not missing_modules: from .patch import patch from .tracers", "# otherwise you won't have access to the patched version from oteltrace import", "agreed to in writing, software # distributed under the License is distributed on", ":: # Make sure to import mysql.connector and not the 'connect' function, #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "under the License. \"\"\"Instrument mysql to report MySQL queries. ``patch_all`` will automatically patch", "Pin, patch import mysql.connector # If not patched yet, you can patch mysql", "`mysql-connector` availability required_modules = ['mysql.connector'] with require_modules(required_modules) as missing_modules: if not missing_modules: from", "(the \"License\"); # you may not use this file except in compliance with", "the 'connect' function, # otherwise you won't have access to the patched version", "Pin.override(conn, service='mysql-users') Only the default full-Python integration works. The binary C connector, provided", "# # Unless required by applicable law or agreed to in writing, software", "service='mysql-users') Only the default full-Python integration works. The binary C connector, provided by", "# Copyright 2019, OpenTelemetry Authors # # Licensed under the Apache License, Version", "supported yet. Help on mysql.connector can be found on: https://dev.mysql.com/doc/connector-python/en/ \"\"\" from ...utils.importlib", "express or implied. # See the License for the specific language governing permissions", "# check `mysql-connector` availability required_modules = ['mysql.connector'] with require_modules(required_modules) as missing_modules: if not", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "otherwise you won't have access to the patched version from oteltrace import Pin,", "except in compliance with the License. # You may obtain a copy of", "span with the default settings conn = mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\", port=3306, database=\"test\") cursor", "https://dev.mysql.com/doc/connector-python/en/ \"\"\" from ...utils.importlib import require_modules # check `mysql-connector` availability required_modules = ['mysql.connector']", "by applicable law or agreed to in writing, software # distributed under the", "yet. Help on mysql.connector can be found on: https://dev.mysql.com/doc/connector-python/en/ \"\"\" from ...utils.importlib import", "report MySQL queries. ``patch_all`` will automatically patch your mysql connection to make it", "mysql.connector # If not patched yet, you can patch mysql specifically patch(mysql=True) #", "required_modules = ['mysql.connector'] with require_modules(required_modules) as missing_modules: if not missing_modules: from .patch import", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "# This will report a span with the default settings conn = mysql.connector.connect(user=\"alice\",", "Help on mysql.connector can be found on: https://dev.mysql.com/doc/connector-python/en/ \"\"\" from ...utils.importlib import require_modules", "This will report a span with the default settings conn = mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\",", "the_answer;\") # Use a pin to specify metadata related to this connection Pin.override(conn,", "either express or implied. # See the License for the specific language governing", "= mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\", port=3306, database=\"test\") cursor = conn.cursor() cursor.execute(\"SELECT 6*7 AS the_answer;\")", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "Make sure to import mysql.connector and not the 'connect' function, # otherwise you", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "the License. \"\"\"Instrument mysql to report MySQL queries. ``patch_all`` will automatically patch your", "to report MySQL queries. ``patch_all`` will automatically patch your mysql connection to make", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "availability required_modules = ['mysql.connector'] with require_modules(required_modules) as missing_modules: if not missing_modules: from .patch", "the specific language governing permissions and # limitations under the License. \"\"\"Instrument mysql", "host=\"localhost\", port=3306, database=\"test\") cursor = conn.cursor() cursor.execute(\"SELECT 6*7 AS the_answer;\") # Use a", "mysql.connector can be found on: https://dev.mysql.com/doc/connector-python/en/ \"\"\" from ...utils.importlib import require_modules # check", "integration works. The binary C connector, provided by _mysql_connector, is not supported yet.", "with the default settings conn = mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\", port=3306, database=\"test\") cursor =", "file except in compliance with the License. # You may obtain a copy", "License. \"\"\"Instrument mysql to report MySQL queries. ``patch_all`` will automatically patch your mysql", "the default full-Python integration works. The binary C connector, provided by _mysql_connector, is", "on mysql.connector can be found on: https://dev.mysql.com/doc/connector-python/en/ \"\"\" from ...utils.importlib import require_modules #", "not missing_modules: from .patch import patch from .tracers import get_traced_mysql_connection __all__ = ['get_traced_mysql_connection',", "= ['mysql.connector'] with require_modules(required_modules) as missing_modules: if not missing_modules: from .patch import patch", "missing_modules: if not missing_modules: from .patch import patch from .tracers import get_traced_mysql_connection __all__", "cursor = conn.cursor() cursor.execute(\"SELECT 6*7 AS the_answer;\") # Use a pin to specify", "connection Pin.override(conn, service='mysql-users') Only the default full-Python integration works. The binary C connector,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "['mysql.connector'] with require_modules(required_modules) as missing_modules: if not missing_modules: from .patch import patch from", "License for the specific language governing permissions and # limitations under the License.", "governing permissions and # limitations under the License. \"\"\"Instrument mysql to report MySQL", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "will report a span with the default settings conn = mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\",", "default full-Python integration works. The binary C connector, provided by _mysql_connector, is not", "can patch mysql specifically patch(mysql=True) # This will report a span with the", "the License. # You may obtain a copy of the License at #", "you won't have access to the patched version from oteltrace import Pin, patch", "your mysql connection to make it work. :: # Make sure to import", "require_modules # check `mysql-connector` availability required_modules = ['mysql.connector'] with require_modules(required_modules) as missing_modules: if", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "the patched version from oteltrace import Pin, patch import mysql.connector # If not", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "mysql to report MySQL queries. ``patch_all`` will automatically patch your mysql connection to", "If not patched yet, you can patch mysql specifically patch(mysql=True) # This will", "\"License\"); # you may not use this file except in compliance with the", "# Make sure to import mysql.connector and not the 'connect' function, # otherwise", "mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\", port=3306, database=\"test\") cursor = conn.cursor() cursor.execute(\"SELECT 6*7 AS the_answer;\") #", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "patch(mysql=True) # This will report a span with the default settings conn =", "required by applicable law or agreed to in writing, software # distributed under", "patch mysql specifically patch(mysql=True) # This will report a span with the default", "make it work. :: # Make sure to import mysql.connector and not the", "applicable law or agreed to in writing, software # distributed under the License", "patch import mysql.connector # If not patched yet, you can patch mysql specifically", "binary C connector, provided by _mysql_connector, is not supported yet. Help on mysql.connector", "and not the 'connect' function, # otherwise you won't have access to the", "import mysql.connector and not the 'connect' function, # otherwise you won't have access", "a span with the default settings conn = mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\", port=3306, database=\"test\")", "The binary C connector, provided by _mysql_connector, is not supported yet. Help on", "specify metadata related to this connection Pin.override(conn, service='mysql-users') Only the default full-Python integration", "missing_modules: from .patch import patch from .tracers import get_traced_mysql_connection __all__ = ['get_traced_mysql_connection', 'patch']", "for the specific language governing permissions and # limitations under the License. \"\"\"Instrument", "or agreed to in writing, software # distributed under the License is distributed", "or implied. # See the License for the specific language governing permissions and", "won't have access to the patched version from oteltrace import Pin, patch import", "6*7 AS the_answer;\") # Use a pin to specify metadata related to this", "works. The binary C connector, provided by _mysql_connector, is not supported yet. Help", "have access to the patched version from oteltrace import Pin, patch import mysql.connector", "yet, you can patch mysql specifically patch(mysql=True) # This will report a span", "specifically patch(mysql=True) # This will report a span with the default settings conn", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "to make it work. :: # Make sure to import mysql.connector and not", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "\"\"\" from ...utils.importlib import require_modules # check `mysql-connector` availability required_modules = ['mysql.connector'] with", "mysql.connector and not the 'connect' function, # otherwise you won't have access to", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "# If not patched yet, you can patch mysql specifically patch(mysql=True) # This", "can be found on: https://dev.mysql.com/doc/connector-python/en/ \"\"\" from ...utils.importlib import require_modules # check `mysql-connector`", "language governing permissions and # limitations under the License. \"\"\"Instrument mysql to report", "pin to specify metadata related to this connection Pin.override(conn, service='mysql-users') Only the default", "full-Python integration works. The binary C connector, provided by _mysql_connector, is not supported", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "oteltrace import Pin, patch import mysql.connector # If not patched yet, you can", "the default settings conn = mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\", port=3306, database=\"test\") cursor = conn.cursor()", "to specify metadata related to this connection Pin.override(conn, service='mysql-users') Only the default full-Python", "in writing, software # distributed under the License is distributed on an \"AS", "report a span with the default settings conn = mysql.connector.connect(user=\"alice\", password=\"<PASSWORD>\", host=\"localhost\", port=3306,", "Only the default full-Python integration works. The binary C connector, provided by _mysql_connector,", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "to this connection Pin.override(conn, service='mysql-users') Only the default full-Python integration works. The binary" ]
[ "# -*- coding: utf-8 -*- __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __version__ =", "-*- __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __version__ = \"1.0.0\" from smartystreets.client import", "= \"<EMAIL>\" __version__ = \"1.0.0\" from smartystreets.client import Client # noqa __all__ =", "-*- coding: utf-8 -*- __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __version__ = \"1.0.0\"", "= \"<NAME>\" __email__ = \"<EMAIL>\" __version__ = \"1.0.0\" from smartystreets.client import Client #", "__email__ = \"<EMAIL>\" __version__ = \"1.0.0\" from smartystreets.client import Client # noqa __all__", "utf-8 -*- __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __version__ = \"1.0.0\" from smartystreets.client", "\"<NAME>\" __email__ = \"<EMAIL>\" __version__ = \"1.0.0\" from smartystreets.client import Client # noqa", "\"<EMAIL>\" __version__ = \"1.0.0\" from smartystreets.client import Client # noqa __all__ = [\"Client\"]", "coding: utf-8 -*- __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __version__ = \"1.0.0\" from", "__author__ = \"<NAME>\" __email__ = \"<EMAIL>\" __version__ = \"1.0.0\" from smartystreets.client import Client" ]
[ "# Create such a task_id that info is send to SuperAdmins and node", "restarted on a compute node. \"\"\" _name_ = 'node_system_restarted' def __init__(self, node, **kwargs):", "\"\"\" Called from node_sysinfo_cb after erigonesd:fast is restarted on a compute node. \"\"\"", "import Event from que import TT_DUMMY, TG_DC_UNBOUND from que.utils import DEFAULT_DC, task_id_from_string class", "from api.event import Event from que import TT_DUMMY, TG_DC_UNBOUND from que.utils import DEFAULT_DC,", "_name_ = 'node_system_restarted' def __init__(self, node, **kwargs): # Create such a task_id that", "node owner task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=TT_DUMMY, tg=TG_DC_UNBOUND) kwargs['node_hostname'] = node.hostname super(NodeSystemRestarted,", "TG_DC_UNBOUND from que.utils import DEFAULT_DC, task_id_from_string class NodeSystemRestarted(Event): \"\"\" Called from node_sysinfo_cb after", "from node_sysinfo_cb after erigonesd:fast is restarted on a compute node. \"\"\" _name_ =", "Event from que import TT_DUMMY, TG_DC_UNBOUND from que.utils import DEFAULT_DC, task_id_from_string class NodeSystemRestarted(Event):", "and node owner task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=TT_DUMMY, tg=TG_DC_UNBOUND) kwargs['node_hostname'] = node.hostname", "= 'node_system_restarted' def __init__(self, node, **kwargs): # Create such a task_id that info", "def __init__(self, node, **kwargs): # Create such a task_id that info is send", "that info is send to SuperAdmins and node owner task_id = task_id_from_string(node.owner.id, dummy=True,", "info is send to SuperAdmins and node owner task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC,", "SuperAdmins and node owner task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=TT_DUMMY, tg=TG_DC_UNBOUND) kwargs['node_hostname'] =", "owner task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=TT_DUMMY, tg=TG_DC_UNBOUND) kwargs['node_hostname'] = node.hostname super(NodeSystemRestarted, self).__init__(task_id,", "from que import TT_DUMMY, TG_DC_UNBOUND from que.utils import DEFAULT_DC, task_id_from_string class NodeSystemRestarted(Event): \"\"\"", "NodeSystemRestarted(Event): \"\"\" Called from node_sysinfo_cb after erigonesd:fast is restarted on a compute node.", "after erigonesd:fast is restarted on a compute node. \"\"\" _name_ = 'node_system_restarted' def", "'node_system_restarted' def __init__(self, node, **kwargs): # Create such a task_id that info is", "such a task_id that info is send to SuperAdmins and node owner task_id", "a compute node. \"\"\" _name_ = 'node_system_restarted' def __init__(self, node, **kwargs): # Create", "api.event import Event from que import TT_DUMMY, TG_DC_UNBOUND from que.utils import DEFAULT_DC, task_id_from_string", "__init__(self, node, **kwargs): # Create such a task_id that info is send to", "task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=TT_DUMMY, tg=TG_DC_UNBOUND) kwargs['node_hostname'] = node.hostname super(NodeSystemRestarted, self).__init__(task_id, **kwargs)", "\"\"\" _name_ = 'node_system_restarted' def __init__(self, node, **kwargs): # Create such a task_id", "que import TT_DUMMY, TG_DC_UNBOUND from que.utils import DEFAULT_DC, task_id_from_string class NodeSystemRestarted(Event): \"\"\" Called", "**kwargs): # Create such a task_id that info is send to SuperAdmins and", "node. \"\"\" _name_ = 'node_system_restarted' def __init__(self, node, **kwargs): # Create such a", "Create such a task_id that info is send to SuperAdmins and node owner", "is restarted on a compute node. \"\"\" _name_ = 'node_system_restarted' def __init__(self, node,", "from que.utils import DEFAULT_DC, task_id_from_string class NodeSystemRestarted(Event): \"\"\" Called from node_sysinfo_cb after erigonesd:fast", "on a compute node. \"\"\" _name_ = 'node_system_restarted' def __init__(self, node, **kwargs): #", "task_id_from_string class NodeSystemRestarted(Event): \"\"\" Called from node_sysinfo_cb after erigonesd:fast is restarted on a", "a task_id that info is send to SuperAdmins and node owner task_id =", "compute node. \"\"\" _name_ = 'node_system_restarted' def __init__(self, node, **kwargs): # Create such", "import TT_DUMMY, TG_DC_UNBOUND from que.utils import DEFAULT_DC, task_id_from_string class NodeSystemRestarted(Event): \"\"\" Called from", "Called from node_sysinfo_cb after erigonesd:fast is restarted on a compute node. \"\"\" _name_", "task_id that info is send to SuperAdmins and node owner task_id = task_id_from_string(node.owner.id,", "is send to SuperAdmins and node owner task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=TT_DUMMY,", "que.utils import DEFAULT_DC, task_id_from_string class NodeSystemRestarted(Event): \"\"\" Called from node_sysinfo_cb after erigonesd:fast is", "class NodeSystemRestarted(Event): \"\"\" Called from node_sysinfo_cb after erigonesd:fast is restarted on a compute", "TT_DUMMY, TG_DC_UNBOUND from que.utils import DEFAULT_DC, task_id_from_string class NodeSystemRestarted(Event): \"\"\" Called from node_sysinfo_cb", "import DEFAULT_DC, task_id_from_string class NodeSystemRestarted(Event): \"\"\" Called from node_sysinfo_cb after erigonesd:fast is restarted", "DEFAULT_DC, task_id_from_string class NodeSystemRestarted(Event): \"\"\" Called from node_sysinfo_cb after erigonesd:fast is restarted on", "to SuperAdmins and node owner task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=TT_DUMMY, tg=TG_DC_UNBOUND) kwargs['node_hostname']", "send to SuperAdmins and node owner task_id = task_id_from_string(node.owner.id, dummy=True, dc_id=DEFAULT_DC, tt=TT_DUMMY, tg=TG_DC_UNBOUND)", "erigonesd:fast is restarted on a compute node. \"\"\" _name_ = 'node_system_restarted' def __init__(self,", "node, **kwargs): # Create such a task_id that info is send to SuperAdmins", "node_sysinfo_cb after erigonesd:fast is restarted on a compute node. \"\"\" _name_ = 'node_system_restarted'" ]
[ "result: if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if len(part.body) > 1: # We use a hack", "%s', workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager = WorkerManager(ProxyTask(), stats) def __delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket,", "workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager = WorkerManager(ProxyTask(), stats) def __delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def", "from lib.stats import LambdaStatsModel, S3StatsModel from lib.workers import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager logger =", "name for a ' 'long lived proxy') self.__verbose = verbose self.__s3Bucket = s3Bucket", "is not None: if 's3' not in stats.models: stats.register_model('s3', S3StatsModel()) self.__s3Stats = stats.get_model('s3')", "logger.info('Starting worker: %d', workerId) workerArgs['longLived'] = True if s3Bucket: workerArgs['s3Bucket'] = s3Bucket def", "workerId, workerResponse): if workerResponse is not None: logger.info('Worker %d ran for %dms and", "self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret = result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret)) return ret def request(self, method,", "workerResponse): if workerResponse is not None: logger.info('Worker %d ran for %dms and proxied", "not in stats.models: stats.register_model('s3', S3StatsModel()) self.__s3Stats = stats.get_model('s3') self.__s3 = boto3.client('s3') self.__s3DeletePool =", "self.__load_object_from_s3(key) elif result.has_attribute('data'): content = result.get_binary_attribute('data') else: content = b'' statusCode = payload['statusCode']", "= self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret = result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret)) return ret def request(self,", "and S3, and be error prone. if len(functions) > 1 and 'arn:' in", "# Single message payload = json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'): key = result.get_string_attribute('s3') content =", "this across regions is not a priority since that would # incur costs", "'requests: Exit reason: %s', workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager = WorkerManager(ProxyTask(), stats) def", "> 1: # We use a hack to send practically empty bodies decodedBody", "is not None: logger.info('Worker %d ran for %dms and proxied %d ' 'requests:", "data: task.add_binary_attribute('data', data) task.set_body(json.dumps({ 'method': method, 'url': url, 'headers': headers })) result =", "@property def lambda_function(self): return random.choice(functions) @property def max_workers(self): return maxLambdas @property def load_factor(self):", "incur costs for SQS and S3, and be error prone. if len(functions) >", "since that would # incur costs for SQS and S3, and be error", "not None: if 's3' not in stats.models: stats.register_model('s3', S3StatsModel()) self.__s3Stats = stats.get_model('s3') self.__s3", "result.has_attribute('s3'): key = result.get_string_attribute('s3') content = self.__load_object_from_s3(key) elif result.has_attribute('data'): content = result.get_binary_attribute('data') else:", "workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager = WorkerManager(ProxyTask(), stats) def __delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def __load_object_from_s3(self,", "function that queues requests in SQS\"\"\" def __init__(self, functions, maxLambdas, s3Bucket, stats, verbose):", "empty bodies decodedBody = b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content = b''.join(dataChunks) else: # Single message", "len(functions) > 1 and 'arn:' in functions[0]: raise NotImplementedError( 'Only a single function", "Key=key) def __load_object_from_s3(self, key): result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret = result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key)", "self.__s3DeletePool = ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig): @property def queue_prefix(self): return 'lambda-proxy' @property def lambda_function(self):", "def post_return_callback(self, workerId, workerResponse): if workerResponse is not None: logger.info('Worker %d ran for", "stats.models: stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats = stats.get_model('lambda') if s3Bucket is not None: if 's3'", "ret def request(self, method, url, headers, data): task = LambdaSqsTask() if data: task.add_binary_attribute('data',", "= self.__load_object_from_s3(key) elif result.has_attribute('data'): content = result.get_binary_attribute('data') else: content = b'' statusCode =", "import json import logging from base64 import b64decode from random import SystemRandom from", "self.__lambdaStats = stats.get_model('lambda') if s3Bucket is not None: if 's3' not in stats.models:", "queues requests in SQS\"\"\" def __init__(self, functions, maxLambdas, s3Bucket, stats, verbose): # Supporting", "boto3 import json import logging from base64 import b64decode from random import SystemRandom", "ret = result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret)) return ret def request(self, method, url, headers,", "headers })) result = self.workerManager.execute(task, timeout=10) if result is None: return ProxyResponse(statusCode=500, headers={},", "Exit reason: %s', workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager = WorkerManager(ProxyTask(), stats) def __delete_object_from_s3(self,", "not in stats.models: stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats = stats.get_model('lambda') if s3Bucket is not None:", "base64 import b64decode from random import SystemRandom from concurrent.futures import ThreadPoolExecutor from lib.proxy", "return ret def request(self, method, url, headers, data): task = LambdaSqsTask() if data:", "priority since that would # incur costs for SQS and S3, and be", "'arn:' in functions[0]: raise NotImplementedError( 'Only a single function may be specified by", "url, headers, data): task = LambdaSqsTask() if data: task.add_binary_attribute('data', data) task.set_body(json.dumps({ 'method': method,", "= stats.get_model('s3') self.__s3 = boto3.client('s3') self.__s3DeletePool = ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig): @property def queue_prefix(self):", "result.get_binary_attribute('data') else: content = b'' statusCode = payload['statusCode'] responseHeaders = payload['headers'] return ProxyResponse(statusCode=statusCode,", "key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def __load_object_from_s3(self, key): result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret = result['Body'].read()", "self.__s3 = boto3.client('s3') self.__s3DeletePool = ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig): @property def queue_prefix(self): return 'lambda-proxy'", "'url': url, 'headers': headers })) result = self.workerManager.execute(task, timeout=10) if result is None:", "data): task = LambdaSqsTask() if data: task.add_binary_attribute('data', data) task.set_body(json.dumps({ 'method': method, 'url': url,", "'s3' not in stats.models: stats.register_model('s3', S3StatsModel()) self.__s3Stats = stats.get_model('s3') self.__s3 = boto3.client('s3') self.__s3DeletePool", "maxLambdas, s3Bucket, stats, verbose): # Supporting this across regions is not a priority", "= s3Bucket def post_return_callback(self, workerId, workerResponse): if workerResponse is not None: logger.info('Worker %d", "return maxLambdas @property def load_factor(self): return 4 def pre_invoke_callback(self, workerId, workerArgs): logger.info('Starting worker:", "part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if len(part.body) > 1: # We use a hack to send", "'lambda' not in stats.models: stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats = stats.get_model('lambda') if s3Bucket is not", "def __init__(self, functions, maxLambdas, s3Bucket, stats, verbose): # Supporting this across regions is", "result is None: return ProxyResponse(statusCode=500, headers={}, content='') if type(result) is list: # Fragmented", "= boto3.client('s3') self.__s3DeletePool = ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig): @property def queue_prefix(self): return 'lambda-proxy' @property", "SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a function that queues requests in SQS\"\"\" def __init__(self,", "stats.get_model('lambda') if s3Bucket is not None: if 's3' not in stats.models: stats.register_model('s3', S3StatsModel())", "specified by name for a ' 'long lived proxy') self.__verbose = verbose self.__s3Bucket", "s3Bucket if 'lambda' not in stats.models: stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats = stats.get_model('lambda') if s3Bucket", "key) self.__s3Stats.record_get(len(ret)) return ret def request(self, method, url, headers, data): task = LambdaSqsTask()", "b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content = b''.join(dataChunks) else: # Single message payload = json.loads(b64decode(result.body).decode('zlib')) if", "def lambda_function(self): return random.choice(functions) @property def max_workers(self): return maxLambdas @property def load_factor(self): return", "post_return_callback(self, workerId, workerResponse): if workerResponse is not None: logger.info('Worker %d ran for %dms", "timeout=10) if result is None: return ProxyResponse(statusCode=500, headers={}, content='') if type(result) is list:", "payload = json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'): key = result.get_string_attribute('s3') content = self.__load_object_from_s3(key) elif result.has_attribute('data'):", "= [] for part in result: if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if len(part.body) > 1:", "self.__s3Stats.record_get(len(ret)) return ret def request(self, method, url, headers, data): task = LambdaSqsTask() if", "> 1 and 'arn:' in functions[0]: raise NotImplementedError( 'Only a single function may", "for %dms and proxied %d ' 'requests: Exit reason: %s', workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'],", "__init__(self, functions, maxLambdas, s3Bucket, stats, verbose): # Supporting this across regions is not", "content = b''.join(dataChunks) else: # Single message payload = json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'): key", "return random.choice(functions) @property def max_workers(self): return maxLambdas @property def load_factor(self): return 4 def", "S3StatsModel()) self.__s3Stats = stats.get_model('s3') self.__s3 = boto3.client('s3') self.__s3DeletePool = ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig): @property", "workerResponse is not None: logger.info('Worker %d ran for %dms and proxied %d '", "= json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'): key = result.get_string_attribute('s3') content = self.__load_object_from_s3(key) elif result.has_attribute('data'): content", "if 'lambda' not in stats.models: stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats = stats.get_model('lambda') if s3Bucket is", "'headers': headers })) result = self.workerManager.execute(task, timeout=10) if result is None: return ProxyResponse(statusCode=500,", "' 'long lived proxy') self.__verbose = verbose self.__s3Bucket = s3Bucket if 'lambda' not", "bodies decodedBody = b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content = b''.join(dataChunks) else: # Single message payload", "if data: task.add_binary_attribute('data', data) task.set_body(json.dumps({ 'method': method, 'url': url, 'headers': headers })) result", "import ThreadPoolExecutor from lib.proxy import AbstractRequestProxy, ProxyResponse from lib.stats import LambdaStatsModel, S3StatsModel from", "and proxied %d ' 'requests: Exit reason: %s', workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager", "random.choice(functions) @property def max_workers(self): return maxLambdas @property def load_factor(self): return 4 def pre_invoke_callback(self,", "reason: %s', workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager = WorkerManager(ProxyTask(), stats) def __delete_object_from_s3(self, key):", "not a priority since that would # incur costs for SQS and S3,", "from lib.workers import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager logger = logging.getLogger(__name__) random = SystemRandom() class", "def load_factor(self): return 4 def pre_invoke_callback(self, workerId, workerArgs): logger.info('Starting worker: %d', workerId) workerArgs['longLived']", "ProxyResponse(statusCode=500, headers={}, content='') if type(result) is list: # Fragmented response payload = {}", "if type(result) is list: # Fragmented response payload = {} dataChunks = []", "part in result: if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if len(part.body) > 1: # We use", "by name for a ' 'long lived proxy') self.__verbose = verbose self.__s3Bucket =", "%d', workerId) workerArgs['longLived'] = True if s3Bucket: workerArgs['s3Bucket'] = s3Bucket def post_return_callback(self, workerId,", "headers={}, content='') if type(result) is list: # Fragmented response payload = {} dataChunks", "hack to send practically empty bodies decodedBody = b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content = b''.join(dataChunks)", "from random import SystemRandom from concurrent.futures import ThreadPoolExecutor from lib.proxy import AbstractRequestProxy, ProxyResponse", "json import logging from base64 import b64decode from random import SystemRandom from concurrent.futures", "'method': method, 'url': url, 'headers': headers })) result = self.workerManager.execute(task, timeout=10) if result", "from concurrent.futures import ThreadPoolExecutor from lib.proxy import AbstractRequestProxy, ProxyResponse from lib.stats import LambdaStatsModel,", "workerResponse['exitReason']) self.workerManager = WorkerManager(ProxyTask(), stats) def __delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def __load_object_from_s3(self, key):", "stats) def __delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def __load_object_from_s3(self, key): result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key)", "b64decode from random import SystemRandom from concurrent.futures import ThreadPoolExecutor from lib.proxy import AbstractRequestProxy,", "self.__s3Stats = stats.get_model('s3') self.__s3 = boto3.client('s3') self.__s3DeletePool = ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig): @property def", "result = self.workerManager.execute(task, timeout=10) if result is None: return ProxyResponse(statusCode=500, headers={}, content='') if", "LambdaSqsTask() if data: task.add_binary_attribute('data', data) task.set_body(json.dumps({ 'method': method, 'url': url, 'headers': headers }))", "def queue_prefix(self): return 'lambda-proxy' @property def lambda_function(self): return random.choice(functions) @property def max_workers(self): return", "= logging.getLogger(__name__) random = SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a function that queues requests", "[] for part in result: if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if len(part.body) > 1: #", "raise NotImplementedError( 'Only a single function may be specified by name for a", "message payload = json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'): key = result.get_string_attribute('s3') content = self.__load_object_from_s3(key) elif", "a ' 'long lived proxy') self.__verbose = verbose self.__s3Bucket = s3Bucket if 'lambda'", "return ProxyResponse(statusCode=500, headers={}, content='') if type(result) is list: # Fragmented response payload =", "= stats.get_model('lambda') if s3Bucket is not None: if 's3' not in stats.models: stats.register_model('s3',", "proxied %d ' 'requests: Exit reason: %s', workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager =", "ProxyResponse from lib.stats import LambdaStatsModel, S3StatsModel from lib.workers import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager logger", "__delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def __load_object_from_s3(self, key): result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret =", "would # incur costs for SQS and S3, and be error prone. if", "lived proxy') self.__verbose = verbose self.__s3Bucket = s3Bucket if 'lambda' not in stats.models:", "practically empty bodies decodedBody = b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content = b''.join(dataChunks) else: # Single", "payload.update(json.loads(decodedBody)) content = b''.join(dataChunks) else: # Single message payload = json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'):", "= b''.join(dataChunks) else: # Single message payload = json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'): key =", "WorkerManager(ProxyTask(), stats) def __delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def __load_object_from_s3(self, key): result = self.__s3.get_object(Bucket=self.__s3Bucket,", "json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'): key = result.get_string_attribute('s3') content = self.__load_object_from_s3(key) elif result.has_attribute('data'): content =", "AbstractRequestProxy, ProxyResponse from lib.stats import LambdaStatsModel, S3StatsModel from lib.workers import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager", "concurrent.futures import ThreadPoolExecutor from lib.proxy import AbstractRequestProxy, ProxyResponse from lib.stats import LambdaStatsModel, S3StatsModel", "verbose): # Supporting this across regions is not a priority since that would", "import SystemRandom from concurrent.futures import ThreadPoolExecutor from lib.proxy import AbstractRequestProxy, ProxyResponse from lib.stats", "%d ran for %dms and proxied %d ' 'requests: Exit reason: %s', workerId,", "if result is None: return ProxyResponse(statusCode=500, headers={}, content='') if type(result) is list: #", "s3Bucket: workerArgs['s3Bucket'] = s3Bucket def post_return_callback(self, workerId, workerResponse): if workerResponse is not None:", "list: # Fragmented response payload = {} dataChunks = [] for part in", "prone. if len(functions) > 1 and 'arn:' in functions[0]: raise NotImplementedError( 'Only a", "'long lived proxy') self.__verbose = verbose self.__s3Bucket = s3Bucket if 'lambda' not in", "lib.stats import LambdaStatsModel, S3StatsModel from lib.workers import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager logger = logging.getLogger(__name__)", "import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager logger = logging.getLogger(__name__) random = SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return", "'Only a single function may be specified by name for a ' 'long", "4 def pre_invoke_callback(self, workerId, workerArgs): logger.info('Starting worker: %d', workerId) workerArgs['longLived'] = True if", "request(self, method, url, headers, data): task = LambdaSqsTask() if data: task.add_binary_attribute('data', data) task.set_body(json.dumps({", "worker: %d', workerId) workerArgs['longLived'] = True if s3Bucket: workerArgs['s3Bucket'] = s3Bucket def post_return_callback(self,", "@property def load_factor(self): return 4 def pre_invoke_callback(self, workerId, workerArgs): logger.info('Starting worker: %d', workerId)", "decodedBody = b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content = b''.join(dataChunks) else: # Single message payload =", "if s3Bucket is not None: if 's3' not in stats.models: stats.register_model('s3', S3StatsModel()) self.__s3Stats", "workerArgs['longLived'] = True if s3Bucket: workerArgs['s3Bucket'] = s3Bucket def post_return_callback(self, workerId, workerResponse): if", "a function that queues requests in SQS\"\"\" def __init__(self, functions, maxLambdas, s3Bucket, stats,", "if 's3' not in stats.models: stats.register_model('s3', S3StatsModel()) self.__s3Stats = stats.get_model('s3') self.__s3 = boto3.client('s3')", "be error prone. if len(functions) > 1 and 'arn:' in functions[0]: raise NotImplementedError(", "a hack to send practically empty bodies decodedBody = b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content =", "costs for SQS and S3, and be error prone. if len(functions) > 1", "stats.register_model('s3', S3StatsModel()) self.__s3Stats = stats.get_model('s3') self.__s3 = boto3.client('s3') self.__s3DeletePool = ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig):", "class ProxyTask(LambdaSqsTaskConfig): @property def queue_prefix(self): return 'lambda-proxy' @property def lambda_function(self): return random.choice(functions) @property", "= b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content = b''.join(dataChunks) else: # Single message payload = json.loads(b64decode(result.body).decode('zlib'))", "queue_prefix(self): return 'lambda-proxy' @property def lambda_function(self): return random.choice(functions) @property def max_workers(self): return maxLambdas", "functions[0]: raise NotImplementedError( 'Only a single function may be specified by name for", "else: # Single message payload = json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'): key = result.get_string_attribute('s3') content", "None: logger.info('Worker %d ran for %dms and proxied %d ' 'requests: Exit reason:", "stats.models: stats.register_model('s3', S3StatsModel()) self.__s3Stats = stats.get_model('s3') self.__s3 = boto3.client('s3') self.__s3DeletePool = ThreadPoolExecutor(1) class", "functions, maxLambdas, s3Bucket, stats, verbose): # Supporting this across regions is not a", "1: # We use a hack to send practically empty bodies decodedBody =", "content = b'' statusCode = payload['statusCode'] responseHeaders = payload['headers'] return ProxyResponse(statusCode=statusCode, headers=responseHeaders, content=content)", "= LambdaSqsTask() if data: task.add_binary_attribute('data', data) task.set_body(json.dumps({ 'method': method, 'url': url, 'headers': headers", "use a hack to send practically empty bodies decodedBody = b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content", "return 'lambda-proxy' @property def lambda_function(self): return random.choice(functions) @property def max_workers(self): return maxLambdas @property", "= ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig): @property def queue_prefix(self): return 'lambda-proxy' @property def lambda_function(self): return", "task = LambdaSqsTask() if data: task.add_binary_attribute('data', data) task.set_body(json.dumps({ 'method': method, 'url': url, 'headers':", "None: return ProxyResponse(statusCode=500, headers={}, content='') if type(result) is list: # Fragmented response payload", "in stats.models: stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats = stats.get_model('lambda') if s3Bucket is not None: if", "= s3Bucket if 'lambda' not in stats.models: stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats = stats.get_model('lambda') if", "stats, verbose): # Supporting this across regions is not a priority since that", "LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a function that queues requests in SQS\"\"\" def __init__(self, functions, maxLambdas,", "= WorkerManager(ProxyTask(), stats) def __delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def __load_object_from_s3(self, key): result =", "ProxyTask(LambdaSqsTaskConfig): @property def queue_prefix(self): return 'lambda-proxy' @property def lambda_function(self): return random.choice(functions) @property def", "logging.getLogger(__name__) random = SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a function that queues requests in", "S3, and be error prone. if len(functions) > 1 and 'arn:' in functions[0]:", "s3Bucket, stats, verbose): # Supporting this across regions is not a priority since", "= True if s3Bucket: workerArgs['s3Bucket'] = s3Bucket def post_return_callback(self, workerId, workerResponse): if workerResponse", "len(part.body) > 1: # We use a hack to send practically empty bodies", "= self.workerManager.execute(task, timeout=10) if result is None: return ProxyResponse(statusCode=500, headers={}, content='') if type(result)", "= {} dataChunks = [] for part in result: if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if", "s3Bucket def post_return_callback(self, workerId, workerResponse): if workerResponse is not None: logger.info('Worker %d ran", "self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret)) return ret def request(self, method, url, headers, data): task =", "result.has_attribute('data'): content = result.get_binary_attribute('data') else: content = b'' statusCode = payload['statusCode'] responseHeaders =", "= verbose self.__s3Bucket = s3Bucket if 'lambda' not in stats.models: stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats", "method, url, headers, data): task = LambdaSqsTask() if data: task.add_binary_attribute('data', data) task.set_body(json.dumps({ 'method':", "LambdaStatsModel, S3StatsModel from lib.workers import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager logger = logging.getLogger(__name__) random =", "workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager = WorkerManager(ProxyTask(), stats) def __delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key)", "elif result.has_attribute('data'): content = result.get_binary_attribute('data') else: content = b'' statusCode = payload['statusCode'] responseHeaders", "in stats.models: stats.register_model('s3', S3StatsModel()) self.__s3Stats = stats.get_model('s3') self.__s3 = boto3.client('s3') self.__s3DeletePool = ThreadPoolExecutor(1)", "NotImplementedError( 'Only a single function may be specified by name for a '", "%d ' 'requests: Exit reason: %s', workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager = WorkerManager(ProxyTask(),", "for part in result: if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if len(part.body) > 1: # We", "that queues requests in SQS\"\"\" def __init__(self, functions, maxLambdas, s3Bucket, stats, verbose): #", "verbose self.__s3Bucket = s3Bucket if 'lambda' not in stats.models: stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats =", "if workerResponse is not None: logger.info('Worker %d ran for %dms and proxied %d", "requests in SQS\"\"\" def __init__(self, functions, maxLambdas, s3Bucket, stats, verbose): # Supporting this", "1 and 'arn:' in functions[0]: raise NotImplementedError( 'Only a single function may be", "task.set_body(json.dumps({ 'method': method, 'url': url, 'headers': headers })) result = self.workerManager.execute(task, timeout=10) if", "is not a priority since that would # incur costs for SQS and", "from lib.proxy import AbstractRequestProxy, ProxyResponse from lib.stats import LambdaStatsModel, S3StatsModel from lib.workers import", "Key=key) ret = result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret)) return ret def request(self, method, url,", "content = result.get_binary_attribute('data') else: content = b'' statusCode = payload['statusCode'] responseHeaders = payload['headers']", "self.__verbose = verbose self.__s3Bucket = s3Bucket if 'lambda' not in stats.models: stats.register_model('lambda', LambdaStatsModel())", "'lambda-proxy' @property def lambda_function(self): return random.choice(functions) @property def max_workers(self): return maxLambdas @property def", "import logging from base64 import b64decode from random import SystemRandom from concurrent.futures import", "True if s3Bucket: workerArgs['s3Bucket'] = s3Bucket def post_return_callback(self, workerId, workerResponse): if workerResponse is", "Single message payload = json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'): key = result.get_string_attribute('s3') content = self.__load_object_from_s3(key)", "= result.get_string_attribute('s3') content = self.__load_object_from_s3(key) elif result.has_attribute('data'): content = result.get_binary_attribute('data') else: content =", "s3Bucket is not None: if 's3' not in stats.models: stats.register_model('s3', S3StatsModel()) self.__s3Stats =", "stats.get_model('s3') self.__s3 = boto3.client('s3') self.__s3DeletePool = ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig): @property def queue_prefix(self): return", "in SQS\"\"\" def __init__(self, functions, maxLambdas, s3Bucket, stats, verbose): # Supporting this across", "# We use a hack to send practically empty bodies decodedBody = b64decode(part.body).decode('zlib')", "and 'arn:' in functions[0]: raise NotImplementedError( 'Only a single function may be specified", "if s3Bucket: workerArgs['s3Bucket'] = s3Bucket def post_return_callback(self, workerId, workerResponse): if workerResponse is not", "else: content = b'' statusCode = payload['statusCode'] responseHeaders = payload['headers'] return ProxyResponse(statusCode=statusCode, headers=responseHeaders,", "# Fragmented response payload = {} dataChunks = [] for part in result:", "error prone. if len(functions) > 1 and 'arn:' in functions[0]: raise NotImplementedError( 'Only", "import b64decode from random import SystemRandom from concurrent.futures import ThreadPoolExecutor from lib.proxy import", "stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats = stats.get_model('lambda') if s3Bucket is not None: if 's3' not", "a single function may be specified by name for a ' 'long lived", "{} dataChunks = [] for part in result: if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if len(part.body)", "We use a hack to send practically empty bodies decodedBody = b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody))", "# incur costs for SQS and S3, and be error prone. if len(functions)", "LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager logger = logging.getLogger(__name__) random = SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a", "import boto3 import json import logging from base64 import b64decode from random import", "that would # incur costs for SQS and S3, and be error prone.", "S3StatsModel from lib.workers import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager logger = logging.getLogger(__name__) random = SystemRandom()", "if len(part.body) > 1: # We use a hack to send practically empty", "Supporting this across regions is not a priority since that would # incur", "def request(self, method, url, headers, data): task = LambdaSqsTask() if data: task.add_binary_attribute('data', data)", "task.add_binary_attribute('data', data) task.set_body(json.dumps({ 'method': method, 'url': url, 'headers': headers })) result = self.workerManager.execute(task,", "data) task.set_body(json.dumps({ 'method': method, 'url': url, 'headers': headers })) result = self.workerManager.execute(task, timeout=10)", "to send practically empty bodies decodedBody = b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content = b''.join(dataChunks) else:", "def max_workers(self): return maxLambdas @property def load_factor(self): return 4 def pre_invoke_callback(self, workerId, workerArgs):", "def pre_invoke_callback(self, workerId, workerArgs): logger.info('Starting worker: %d', workerId) workerArgs['longLived'] = True if s3Bucket:", "function may be specified by name for a ' 'long lived proxy') self.__verbose", "max_workers(self): return maxLambdas @property def load_factor(self): return 4 def pre_invoke_callback(self, workerId, workerArgs): logger.info('Starting", "import AbstractRequestProxy, ProxyResponse from lib.stats import LambdaStatsModel, S3StatsModel from lib.workers import LambdaSqsTaskConfig, LambdaSqsTask,", "def __load_object_from_s3(self, key): result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret = result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret))", "is None: return ProxyResponse(statusCode=500, headers={}, content='') if type(result) is list: # Fragmented response", "send practically empty bodies decodedBody = b64decode(part.body).decode('zlib') payload.update(json.loads(decodedBody)) content = b''.join(dataChunks) else: #", "self.workerManager.execute(task, timeout=10) if result is None: return ProxyResponse(statusCode=500, headers={}, content='') if type(result) is", "\"\"\"Return a function that queues requests in SQS\"\"\" def __init__(self, functions, maxLambdas, s3Bucket,", "def __delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def __load_object_from_s3(self, key): result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret", "b''.join(dataChunks) else: # Single message payload = json.loads(b64decode(result.body).decode('zlib')) if result.has_attribute('s3'): key = result.get_string_attribute('s3')", "not None: logger.info('Worker %d ran for %dms and proxied %d ' 'requests: Exit", "be specified by name for a ' 'long lived proxy') self.__verbose = verbose", "ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig): @property def queue_prefix(self): return 'lambda-proxy' @property def lambda_function(self): return random.choice(functions)", "@property def max_workers(self): return maxLambdas @property def load_factor(self): return 4 def pre_invoke_callback(self, workerId,", "workerArgs): logger.info('Starting worker: %d', workerId) workerArgs['longLived'] = True if s3Bucket: workerArgs['s3Bucket'] = s3Bucket", "random import SystemRandom from concurrent.futures import ThreadPoolExecutor from lib.proxy import AbstractRequestProxy, ProxyResponse from", "result.get_string_attribute('s3') content = self.__load_object_from_s3(key) elif result.has_attribute('data'): content = result.get_binary_attribute('data') else: content = b''", "random = SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a function that queues requests in SQS\"\"\"", "type(result) is list: # Fragmented response payload = {} dataChunks = [] for", "regions is not a priority since that would # incur costs for SQS", "for SQS and S3, and be error prone. if len(functions) > 1 and", "dataChunks = [] for part in result: if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if len(part.body) >", "SQS\"\"\" def __init__(self, functions, maxLambdas, s3Bucket, stats, verbose): # Supporting this across regions", "logging from base64 import b64decode from random import SystemRandom from concurrent.futures import ThreadPoolExecutor", "maxLambdas @property def load_factor(self): return 4 def pre_invoke_callback(self, workerId, workerArgs): logger.info('Starting worker: %d',", "pre_invoke_callback(self, workerId, workerArgs): logger.info('Starting worker: %d', workerId) workerArgs['longLived'] = True if s3Bucket: workerArgs['s3Bucket']", "})) result = self.workerManager.execute(task, timeout=10) if result is None: return ProxyResponse(statusCode=500, headers={}, content='')", "lambda_function(self): return random.choice(functions) @property def max_workers(self): return maxLambdas @property def load_factor(self): return 4", "LambdaStatsModel()) self.__lambdaStats = stats.get_model('lambda') if s3Bucket is not None: if 's3' not in", "result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret = result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret)) return ret def", "load_factor(self): return 4 def pre_invoke_callback(self, workerId, workerArgs): logger.info('Starting worker: %d', workerId) workerArgs['longLived'] =", "result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret)) return ret def request(self, method, url, headers, data): task", "payload = {} dataChunks = [] for part in result: if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data'))", "dataChunks.append(part.get_binary_attribute('data')) if len(part.body) > 1: # We use a hack to send practically", "= result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret)) return ret def request(self, method, url, headers, data):", "a priority since that would # incur costs for SQS and S3, and", "single function may be specified by name for a ' 'long lived proxy')", "if len(functions) > 1 and 'arn:' in functions[0]: raise NotImplementedError( 'Only a single", "@property def queue_prefix(self): return 'lambda-proxy' @property def lambda_function(self): return random.choice(functions) @property def max_workers(self):", "logger.info('Worker %d ran for %dms and proxied %d ' 'requests: Exit reason: %s',", "# Supporting this across regions is not a priority since that would #", "for a ' 'long lived proxy') self.__verbose = verbose self.__s3Bucket = s3Bucket if", "and be error prone. if len(functions) > 1 and 'arn:' in functions[0]: raise", "logger = logging.getLogger(__name__) random = SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a function that queues", "SQS and S3, and be error prone. if len(functions) > 1 and 'arn:'", "response payload = {} dataChunks = [] for part in result: if part.has_attribute('data'):", "workerId, workerArgs): logger.info('Starting worker: %d', workerId) workerArgs['longLived'] = True if s3Bucket: workerArgs['s3Bucket'] =", "lib.workers import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager logger = logging.getLogger(__name__) random = SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy):", "LambdaSqsTask, WorkerManager logger = logging.getLogger(__name__) random = SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a function", "__load_object_from_s3(self, key): result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret = result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret)) return", "content='') if type(result) is list: # Fragmented response payload = {} dataChunks =", "key = result.get_string_attribute('s3') content = self.__load_object_from_s3(key) elif result.has_attribute('data'): content = result.get_binary_attribute('data') else: content", "boto3.client('s3') self.__s3DeletePool = ThreadPoolExecutor(1) class ProxyTask(LambdaSqsTaskConfig): @property def queue_prefix(self): return 'lambda-proxy' @property def", "' 'requests: Exit reason: %s', workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason']) self.workerManager = WorkerManager(ProxyTask(), stats)", "workerId) workerArgs['longLived'] = True if s3Bucket: workerArgs['s3Bucket'] = s3Bucket def post_return_callback(self, workerId, workerResponse):", "proxy') self.__verbose = verbose self.__s3Bucket = s3Bucket if 'lambda' not in stats.models: stats.register_model('lambda',", "%dms and proxied %d ' 'requests: Exit reason: %s', workerId, workerResponse['workerLifetime'], workerResponse['numRequestsProxied'], workerResponse['exitReason'])", "= result.get_binary_attribute('data') else: content = b'' statusCode = payload['statusCode'] responseHeaders = payload['headers'] return", "content = self.__load_object_from_s3(key) elif result.has_attribute('data'): content = result.get_binary_attribute('data') else: content = b'' statusCode", "ran for %dms and proxied %d ' 'requests: Exit reason: %s', workerId, workerResponse['workerLifetime'],", "ThreadPoolExecutor from lib.proxy import AbstractRequestProxy, ProxyResponse from lib.stats import LambdaStatsModel, S3StatsModel from lib.workers", "self.workerManager = WorkerManager(ProxyTask(), stats) def __delete_object_from_s3(self, key): self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def __load_object_from_s3(self, key): result", "None: if 's3' not in stats.models: stats.register_model('s3', S3StatsModel()) self.__s3Stats = stats.get_model('s3') self.__s3 =", "in functions[0]: raise NotImplementedError( 'Only a single function may be specified by name", "url, 'headers': headers })) result = self.workerManager.execute(task, timeout=10) if result is None: return", "headers, data): task = LambdaSqsTask() if data: task.add_binary_attribute('data', data) task.set_body(json.dumps({ 'method': method, 'url':", "import LambdaStatsModel, S3StatsModel from lib.workers import LambdaSqsTaskConfig, LambdaSqsTask, WorkerManager logger = logging.getLogger(__name__) random", "method, 'url': url, 'headers': headers })) result = self.workerManager.execute(task, timeout=10) if result is", "= SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a function that queues requests in SQS\"\"\" def", "WorkerManager logger = logging.getLogger(__name__) random = SystemRandom() class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a function that", "SystemRandom from concurrent.futures import ThreadPoolExecutor from lib.proxy import AbstractRequestProxy, ProxyResponse from lib.stats import", "if result.has_attribute('s3'): key = result.get_string_attribute('s3') content = self.__load_object_from_s3(key) elif result.has_attribute('data'): content = result.get_binary_attribute('data')", "self.__s3.delete_object(Bucket=self.__s3Bucket, Key=key) def __load_object_from_s3(self, key): result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret = result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3,", "<reponame>jhong93/aws-lambda-proxy import boto3 import json import logging from base64 import b64decode from random", "is list: # Fragmented response payload = {} dataChunks = [] for part", "self.__s3Bucket = s3Bucket if 'lambda' not in stats.models: stats.register_model('lambda', LambdaStatsModel()) self.__lambdaStats = stats.get_model('lambda')", "if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if len(part.body) > 1: # We use a hack to", "in result: if part.has_attribute('data'): dataChunks.append(part.get_binary_attribute('data')) if len(part.body) > 1: # We use a", "class LongLivedLambdaProxy(AbstractRequestProxy): \"\"\"Return a function that queues requests in SQS\"\"\" def __init__(self, functions,", "key): result = self.__s3.get_object(Bucket=self.__s3Bucket, Key=key) ret = result['Body'].read() self.__s3DeletePool.submit(self.__delete_object_from_s3, key) self.__s3Stats.record_get(len(ret)) return ret", "across regions is not a priority since that would # incur costs for", "return 4 def pre_invoke_callback(self, workerId, workerArgs): logger.info('Starting worker: %d', workerId) workerArgs['longLived'] = True", "workerArgs['s3Bucket'] = s3Bucket def post_return_callback(self, workerId, workerResponse): if workerResponse is not None: logger.info('Worker", "from base64 import b64decode from random import SystemRandom from concurrent.futures import ThreadPoolExecutor from", "lib.proxy import AbstractRequestProxy, ProxyResponse from lib.stats import LambdaStatsModel, S3StatsModel from lib.workers import LambdaSqsTaskConfig,", "may be specified by name for a ' 'long lived proxy') self.__verbose =", "Fragmented response payload = {} dataChunks = [] for part in result: if" ]
[ "by %s![/color]\" % user) else: self.sendChannelMessage(\"[color=red]A gather is already running![/color]\") \"\"\" Stop the", "that will be played Only available to game mods! \"\"\" def cmd_maps(self, userid,", "sent by users \"\"\" def execCommand(self, cmd): i1 = cmd.index(\"invokeruid\") i1 = cmd.index(\"=\",", "in team2] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\"", "this bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\" Toggle whether the bot is activated Only available to", "\"\"\" def getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" % name)) botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr =", "play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] : [i]Sets you as ready[/i]\\\\n\" \"[color=green]!unready[/color] : [i]Sets you as", "gatherRunning = False global vetoSystem vetoSystem = \"bo3\" # Move all players to", "self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot closed\") \"\"\" Get the client ID for this bot \"\"\"", "the amount of maps that will be played Only available to game mods!", "== userid: if p.isMod: self.sendChannelMessage(\"[color=red]You can't leave your own gather. Use !stop to", "strings \"\"\" def getenc(self, str): return str.encode('ascii') \"\"\" Broadcast message to all bots", "self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\" % (self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return channel \"\"\" Print out a", "cmdToThread = [ Queue(), Queue(), Queue() ] bots = [ BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"],", "1 and not x.__contains__(\"msg=ok\")] d = {} for it in cmd: d[it[0]] =", "\\ \"[color=green]!activate[/color] : [i]Toggle this bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\" Toggle whether the bot is", "x in team1] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\"))", "from telnet and close the client \"\"\" def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot closed\")", "server message \"\"\" def sendServerMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3", "game mod![/color]\" % data) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Sets a user", "been deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're not an admin, GTFO![/color]\") \"\"\" Fix encoding of strings", "'<NAME>' __status__ = 'Production' import threading import telnetlib from Config import config, maps,", "help text to channel \"\"\" def cmd_help(self, userid, user, data): string = \"\\\\n[b]Available", "bot nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" % (self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return telnet \"\"\" Log out from", "vetoSystem = \"bo3\" # Create lists with all the bots and their Queues", "self.closeBot() \"\"\" Connects to the teamspeak server via telnet and returns the telnet", "vetoSystem = data broadcastMessage(\"[color=green]Game changed to %s![/color]\" % data) else: self.sendChannelMessage(\"[color=red]%s not supported![/color]\"", "% self.channel)) msg = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) # When the bot is", "not ex: # Print queue'd messages while not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) # Read commands", "bot will make it easier to set up gathers, all from your teamspeak", "gather) \"\"\" def cmd_start(self, userid, user, data): global gatherRunning if not gatherRunning: gatherRunning", "= 'Copyright 2015, TS3GatherBot' __credits__ = ['<NAME>'] __licence__ = 'MIT' __version__ = '1.0.0'", "\"\"\" Sets a user as ready \"\"\" def cmd_ready(self, userid, user, data): global", "% (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) # Set bot nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" % (self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return", "BotThread(threading.Thread): def __init__(self, name, password, channel, index): super(BotThread, self).__init__() self.commands = { #", "self.start_gather() else: self.sendChannelMessage(\"[color=red]You're already ready![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Set up", "gather currently running![/color]\") \"\"\" Change the amount of maps that will be played", "all available commands[/i]\\\\n\" if userid in admins.keys(): string += \"\\\\n\\\\n\" \\ \"[b]Admin Commands:[/b]\\\\n\"", "gatherRunning = False vetoSystem = \"bo3\" # Create lists with all the bots", "Select virtual server id telnet.write(self.getenc(\"use sid=%s\\n\" % (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) # Set bot nickname", "currently running![/color]\") \"\"\" Set up teams, move players to correct channel and start", "telnet = telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login %s %s\\n\" % (self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\"))", "\"\"\" Toggle whether the bot is activated Only available to admins! \"\"\" def", ": [i]Starts a gather[/i]\\\\n\" \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] : [i]Set the", "len(x.split(\"=\")) > 1 and not x.__contains__(\"msg=ok\")] d = {} for it in cmd:", "to play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] : [i]Sets you as ready[/i]\\\\n\" \"[color=green]!unready[/color] : [i]Sets you", "players to lobby \"\"\" def cmd_stop(self, userid, user, data): global gatherRunning global players", "x if gatherRunning and p.isMod: gatherRunning = False global vetoSystem vetoSystem = \"bo3\"", "from queue import Queue from Player import Player # Amount of players needed", "= \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players = []", "% name)) botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr = botstr.split()[0] return int(botstr.split(\"=\")[1]) \"\"\" Get the", "the gather) \"\"\" def cmd_start(self, userid, user, data): global gatherRunning if not gatherRunning:", "q.put(msg) \"\"\" Init the app \"\"\" active = True players = [] gatherRunning", "[\"clid=\" + str(self.getPlayerId(x.name)) for x in players] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\"", "\"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \\ \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color]", "\"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] : [i]Set", "\"\"\" Print out a server message \"\"\" def sendServerMessage(self, msg): msg = msg.replace(\"", "!ur)[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help (!h)[/color] : [i]List all available", "= index def run(self): self.telnet = self.initBot() self.botId = self.getPlayerId(self.name) self.channel = self.moveToChannel(self.getChannelId(self.channel))", "d[it[0]] = it[1] global active cmdsp = d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0] in self.commands and", "] bots = [ BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"], 0), BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"], 1), BotThread(config[\"user2\"],", "d['msg'] == '!activate': self.commands[cmdsp[0]](userid, d['invokername'], d['msg']) \"\"\" Start gather and set mod (the", "\"\"\" Fix encoding of strings \"\"\" def getenc(self, str): return str.encode('ascii') \"\"\" Broadcast", "are ready! Setting up teams![/color]\" % PLAYERS_NEEDED) l = players[:] import random random.shuffle(l)", "Parse and execute commands sent by users \"\"\" def execCommand(self, cmd): i1 =", "user, data): string = \"\\\\n[b]Available commands are:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\", "%s cid=%s\\n\" % (plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x", "players players.append(Player(user, userid, True)) broadcastMessage(\"[color=green]A gather has been started by %s![/color]\" % user)", "has been stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Change the amount of", "(!notready, !nr, !ur)[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help (!h)[/color] : [i]List", "for x in team2] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g2']))))", "for q in cmdToThread: q.put(msg) \"\"\" Init the app \"\"\" active = True", "pattern=%s\\n\" % (channelLobby))) channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby = channelLobby.split(\"\\\\n\")[1] channelLobby = channelLobby.split()[0] return", "'Production' import threading import telnetlib from Config import config, maps, admins, vetoprocesses from", "the amount of maps to play (default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready (!r, !gaben)[/color] : [i]Sets", "broadcastMessage(\"[color=green]A gather has been started by %s![/color]\" % user) else: self.sendChannelMessage(\"[color=red]A gather is", "self.cmd_help, # Admin commands \"!activate\": self.cmd_activate } self.name = name self.password = password", "commands[/color]\" ) # While an exit command has not been issued ex =", "number please :)) PLAYERS_NEEDED = config[\"players\"] \"\"\" Bot Thread \"\"\" class BotThread(threading.Thread): def", "config[\"pass\"], config[\"gl\"], 0), BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"], 1), BotThread(config[\"user2\"], config[\"pass2\"], config[\"g2\"], 2) ] for", "active = True players = [] gatherRunning = False vetoSystem = \"bo3\" #", "stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Change the amount of maps that", "gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] : [i]Set the amount of maps to play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] :", "random random.shuffle(l) team1 = l[:int(PLAYERS_NEEDED/2)] team2 = l[int(PLAYERS_NEEDED/2):] plrs = [\"clid=\" + str(self.getPlayerId(x.name))", "gather currently running![/color]\") \"\"\" Print help text to channel \"\"\" def cmd_help(self, userid,", "Only available to admins! \"\"\" def cmd_activate(self, userid, user, data): if userid in", "amount of maps that will be played Only available to game mods! \"\"\"", "already ready![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Set up teams, move players", "bots = [ BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"], 0), BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"], 1), BotThread(config[\"user2\"], config[\"pass2\"],", "= [\"clid=\" + str(self.getPlayerId(x.name)) for x in players] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s", "Print queue'd messages while not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) # Read commands from user and", "[i]Sets you as ready[/i]\\\\n\" \\ \"[color=green]!unready (!notready, !nr, !ur)[/color] : [i]Sets you as", ": [i]Set the amount of maps to play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] : [i]Sets you", "telnet.write(self.getenc(\"login %s %s\\n\" % (self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\")) # Select virtual server id telnet.write(self.getenc(\"use", "type !help for a full list of commands[/color]\" ) # While an exit", "str(self.getPlayerId(x.name)) for x in players] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs,", "index): super(BotThread, self).__init__() self.commands = { # User commands \"!start\": self.cmd_start, \"!stop\": self.cmd_stop,", "connections # before exiting thread self.closeBot() \"\"\" Connects to the teamspeak server via", "userid, True)) broadcastMessage(\"[color=green]A gather has been started by %s![/color]\" % user) else: self.sendChannelMessage(\"[color=red]A", "global gatherRunning global players p = None for x in players: if x.uid", "to play (default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready (!r, !gaben)[/color] : [i]Sets you as ready[/i]\\\\n\" \\", "is currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\"", "str(self.getPlayerId(x.name)) for x in team1] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs,", "(plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in team2] plrs", "[i]Sets you as ready[/i]\\\\n\" \"[color=green]!unready[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \"[color=red]Please type !help", "channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return channel \"\"\" Print out a server message \"\"\" def sendServerMessage(self,", "pattern=%s\\n\" % name)) botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr = botstr.split()[0] return int(botstr.split(\"=\")[1]) \"\"\" Get", "= cmd[i1 + 1:i2] cmd = [x.split(\"=\") for x in cmd.split() if len(x.split(\"=\"))", "d['msg']) \"\"\" Start gather and set mod (the one who started the gather)", "1 and p.isMod: data = data[1].lower() if data in vetoprocesses: global vetoSystem vetoSystem", "user) self.start_gather() else: self.sendChannelMessage(\"[color=red]You're already ready![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Set", "userid, user, data): global gatherRunning if gatherRunning: global players for p in players:", "user, data): global gatherRunning if gatherRunning: global players alreadyReady = False for p", "\"\\\\n[b]Available commands are:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color] : [i]Starts a", "[i]Sets you as unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help (!h)[/color] : [i]List all available commands[/i]\\\\n\" if", ": [i]Toggle this bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\" Toggle whether the bot is activated Only", "not alreadyReady: players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s is ready![/color]\" % user) self.start_gather() else: self.sendChannelMessage(\"[color=red]You're already", "p = x if gatherRunning and p.isMod: gatherRunning = False global vetoSystem vetoSystem", "+ str(self.getPlayerId(x.name)) for x in team2] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" %", "broadcastMessage(msg): for q in cmdToThread: q.put(msg) \"\"\" Init the app \"\"\" active =", "== userid: p = x if len(data) > 1 and p.isMod: data =", "users \"\"\" def execCommand(self, cmd): i1 = cmd.index(\"invokeruid\") i1 = cmd.index(\"=\", i1) i2", "data): global gatherRunning if gatherRunning: global players alreadyReady = False for p in", "\\ \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \\ \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \\", "= False while not ex: # Print queue'd messages while not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get())", "self.cmd_activate } self.name = name self.password = password self.telnet = None self.botId =", "\"\"\" def cmd_maps(self, userid, user, data): global gatherRunning if gatherRunning: data = data.split(\"\\\\\\\\s\")", "self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets a player as not ready \"\"\" def cmd_unready(self, userid,", "in admins.keys(): string += \"\\\\n\\\\n\" \\ \"[b]Admin Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\"", "telnet.port) telnet.write(self.getenc(\"login %s %s\\n\" % (self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\")) # Select virtual server id", "i2 = cmd.index(\"\\\\n\", i1) userid = cmd[i1 + 1:i2] cmd = [x.split(\"=\") for", "channel and start veto process \"\"\" def start_gather(self): global players if len(players) ==", "if len(players) == PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players are ready! Setting up teams![/color]\" % PLAYERS_NEEDED)", "active cmdsp = d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0] in self.commands and active) or d['msg'] ==", "% (plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets a player as not ready \"\"\" def", "\"\"\" active = True players = [] gatherRunning = False vetoSystem = \"bo3\"", "channelLobby.split()[0] return int(channelLobby.split(\"=\")[1]) \"\"\" Move user to channel \"\"\" def moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove", "not gatherRunning: gatherRunning = True global players players.append(Player(user, userid, True)) broadcastMessage(\"[color=green]A gather has", "not an admin, GTFO![/color]\") \"\"\" Fix encoding of strings \"\"\" def getenc(self, str):", "Welcome message self.sendChannelMessage( \"\\\\n[b]The GatherBot is currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\"", "instead![/color]\") else: players.remove(p) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Print help text to", "+ 1:i2] cmd = [x.split(\"=\") for x in cmd.split() if len(x.split(\"=\")) > 1", "from your teamspeak 3 server. The bot requires access to the Teamspeak 3", "global players players.append(Player(user, userid, True)) broadcastMessage(\"[color=green]A gather has been started by %s![/color]\" %", "running![/color]\") \"\"\" Change the amount of maps that will be played Only available", "if len(data) > 1 and p.isMod: data = data[1].lower() if data in vetoprocesses:", "team1] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs =", "Created by <NAME> This bot will make it easier to set up gathers,", "x.uid == userid: p = x if len(data) > 1 and p.isMod: data", "to channel \"\"\" def cmd_help(self, userid, user, data): string = \"\\\\n[b]Available commands are:[/b]\\\\n\"", "ready![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Set up teams, move players to", "Player import Player # Amount of players needed to start gather (even number", "\"!notready\": self.cmd_unready, \"!nr\": self.cmd_unready, \"!ur\": self.cmd_unready, \"!help\": self.cmd_help, \"!h\": self.cmd_help, # Admin commands", "%s\\n\" % (self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\")) # Select virtual server id telnet.write(self.getenc(\"use sid=%s\\n\" %", "mods! \"\"\" def cmd_maps(self, userid, user, data): global gatherRunning if gatherRunning: data =", "self.cmd_ready, \"!unready\": self.cmd_unready, \"!notready\": self.cmd_unready, \"!nr\": self.cmd_unready, \"!ur\": self.cmd_unready, \"!help\": self.cmd_help, \"!h\": self.cmd_help,", "unready[/i]\\\\n\\\\n\" \"[color=red]Please type !help for a full list of commands[/color]\" ) # While", "= players[:] import random random.shuffle(l) team1 = l[:int(PLAYERS_NEEDED/2)] team2 = l[int(PLAYERS_NEEDED/2):] plrs =", "\\ \"[color=green]!help (!h)[/color] : [i]List all available commands[/i]\\\\n\" if userid in admins.keys(): string", "alreadyReady = False for p in players: if p.uid == userid: alreadyReady =", "team1 = l[:int(PLAYERS_NEEDED/2)] team2 = l[int(PLAYERS_NEEDED/2):] plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x", "= None self.channel = channel self.ti = index def run(self): self.telnet = self.initBot()", "x in team2] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\"))", "string = \"\\\\n[b]Available commands are:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color] :", "started by %s![/color]\" % user) else: self.sendChannelMessage(\"[color=red]A gather is already running![/color]\") \"\"\" Stop", "self.cmd_ready, \"!r\": self.cmd_ready, \"!gaben\": self.cmd_ready, \"!unready\": self.cmd_unready, \"!notready\": self.cmd_unready, \"!nr\": self.cmd_unready, \"!ur\": self.cmd_unready,", "%s cid=%s\\n\" % (plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players = [] broadcastMessage(\"[color=red]Gather has been", "admins: global active active = not active if active: broadcastMessage(\"[color=green]GatherBot has been activated[/color]\")", "data): string = \"\\\\n[b]Available commands are:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color]", ": [i]Set the amount of maps to play (default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready (!r, !gaben)[/color]", "(plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets a player as not ready \"\"\" def cmd_unready(self,", "self.cmd_maps, \"!ready\": self.cmd_ready, \"!r\": self.cmd_ready, \"!gaben\": self.cmd_ready, \"!unready\": self.cmd_unready, \"!notready\": self.cmd_unready, \"!nr\": self.cmd_unready,", "= [ BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"], 0), BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"], 1), BotThread(config[\"user2\"], config[\"pass2\"], config[\"g2\"],", "make it easier to set up gathers, all from your teamspeak 3 server.", "\"\"\" Parse and execute commands sent by users \"\"\" def execCommand(self, cmd): i1", "data broadcastMessage(\"[color=green]Game changed to %s![/color]\" % data) else: self.sendChannelMessage(\"[color=red]%s not supported![/color]\" % data)", "PLAYERS_NEEDED = config[\"players\"] \"\"\" Bot Thread \"\"\" class BotThread(threading.Thread): def __init__(self, name, password,", "of commands[/color]\" ) # While an exit command has not been issued ex", "bot requires access to the Teamspeak 3 server query! \"\"\" __author__ = '<NAME>'", "self.ti = index def run(self): self.telnet = self.initBot() self.botId = self.getPlayerId(self.name) self.channel =", "cmd_help(self, userid, user, data): string = \"\\\\n[b]Available commands are:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) :", "players p = None for x in players: if x.uid == userid: p", "will make it easier to set up gathers, all from your teamspeak 3", "for p in players: if p.uid == userid: if p.isMod: self.sendChannelMessage(\"[color=red]You can't leave", "\"[color=red]Please type !help for a full list of commands[/color]\" ) # While an", "telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login %s %s\\n\" % (self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\")) # Select", "full list of commands[/color]\" ) # While an exit command has not been", "\"\"\" Connects to the teamspeak server via telnet and returns the telnet client", "msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse", "bot \"\"\" def getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" % name)) botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr", "= True global players players.append(Player(user, userid, True)) broadcastMessage(\"[color=green]A gather has been started by", "close the client \"\"\" def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot closed\") \"\"\" Get the", "cid=%s\\n\" % (plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in", "gatherRunning global players p = None for x in players: if x.uid ==", "the teamspeak server via telnet and returns the telnet client \"\"\" def initBot(self):", "global active cmdsp = d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0] in self.commands and active) or d['msg']", "for x in players: if x.uid == userid: p = x if len(data)", "available to game mods! \"\"\" def cmd_maps(self, userid, user, data): global gatherRunning if", "\"\"\" Bot Thread \"\"\" class BotThread(threading.Thread): def __init__(self, name, password, channel, index): super(BotThread,", "as unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help (!h)[/color] : [i]List all available commands[/i]\\\\n\" if userid in", "PLAYERS_NEEDED) l = players[:] import random random.shuffle(l) team1 = l[:int(PLAYERS_NEEDED/2)] team2 = l[int(PLAYERS_NEEDED/2):]", "bots \"\"\" def broadcastMessage(msg): for q in cmdToThread: q.put(msg) \"\"\" Init the app", "gather currently running![/color]\") \"\"\" Set up teams, move players to correct channel and", "global gatherRunning if gatherRunning: data = data.split(\"\\\\\\\\s\") global players p = None for", "player as not ready \"\"\" def cmd_unready(self, userid, user, data): global gatherRunning if", "msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\"))", "getChannelId(self, channel): channelLobby = channel.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" % (channelLobby))) channelLobby =", "if p.uid == userid: alreadyReady = True if not alreadyReady: players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s", "has been deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're not an admin, GTFO![/color]\") \"\"\" Fix encoding of", "[] gatherRunning = False vetoSystem = \"bo3\" # Create lists with all the", "# Read commands from user and execute them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\" % self.channel))", "cid=%s\\n\" % (self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return channel \"\"\" Print out a server message", "deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're not an admin, GTFO![/color]\") \"\"\" Fix encoding of strings \"\"\"", "alreadyReady = True if not alreadyReady: players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s is ready![/color]\" % user)", "currently running![/color]\") \"\"\" Change the amount of maps that will be played Only", "\"\"\" def sendChannelMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\" %", "are:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \\", "global vetoSystem vetoSystem = data broadcastMessage(\"[color=green]Game changed to %s![/color]\" % data) else: self.sendChannelMessage(\"[color=red]%s", "all bots \"\"\" def broadcastMessage(msg): for q in cmdToThread: q.put(msg) \"\"\" Init the", "Queues cmdToThread = [ Queue(), Queue(), Queue() ] bots = [ BotThread(config[\"user\"], config[\"pass\"],", "as ready[/i]\\\\n\" \"[color=green]!unready[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \"[color=red]Please type !help for a", "x in cmd.split() if len(x.split(\"=\")) > 1 and not x.__contains__(\"msg=ok\")] d = {}", "has been started by %s![/color]\" % user) else: self.sendChannelMessage(\"[color=red]A gather is already running![/color]\")", "True global players players.append(Player(user, userid, True)) broadcastMessage(\"[color=green]A gather has been started by %s![/color]\"", "!gaben)[/color] : [i]Sets you as ready[/i]\\\\n\" \\ \"[color=green]!unready (!notready, !nr, !ur)[/color] : [i]Sets", "msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\"", "from Player import Player # Amount of players needed to start gather (even", "False for p in players: if p.uid == userid: alreadyReady = True if", "# before exiting thread self.closeBot() \"\"\" Connects to the teamspeak server via telnet", "ex = False while not ex: # Print queue'd messages while not cmdToThread[self.ti].empty():", "\"\"\" Set up teams, move players to correct channel and start veto process", "p.isMod: gatherRunning = False global vetoSystem vetoSystem = \"bo3\" # Move all players", "= [\"clid=\" + str(self.getPlayerId(x.name)) for x in team2] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s", "GTFO![/color]\") \"\"\" Fix encoding of strings \"\"\" def getenc(self, str): return str.encode('ascii') \"\"\"", "i1) userid = cmd[i1 + 1:i2] cmd = [x.split(\"=\") for x in cmd.split()", "been stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Change the amount of maps", "admins.keys(): string += \"\\\\n\\\\n\" \\ \"[b]Admin Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\", "to the Teamspeak 3 server query! \"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright", "and close the client \"\"\" def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot closed\") \"\"\" Get", "cmdToThread: q.put(msg) \"\"\" Init the app \"\"\" active = True players = []", "config[\"pass1\"], config[\"g1\"], 1), BotThread(config[\"user2\"], config[\"pass2\"], config[\"g2\"], 2) ] for b in bots: b.start()", "cmd_maps(self, userid, user, data): global gatherRunning if gatherRunning: data = data.split(\"\\\\\\\\s\") global players", "self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets a player as not ready \"\"\" def cmd_unready(self, userid, user,", "\", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print a message", "d['invokername'], d['msg']) \"\"\" Start gather and set mod (the one who started the", "import random random.shuffle(l) team1 = l[:int(PLAYERS_NEEDED/2)] team2 = l[int(PLAYERS_NEEDED/2):] plrs = [\"clid=\" +", "int(botstr.split(\"=\")[1]) \"\"\" Get the channel ID from the name of the channel \"\"\"", "(msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse and execute commands sent by users \"\"\" def execCommand(self,", "cmd.index(\"invokeruid\") i1 = cmd.index(\"=\", i1) i2 = cmd.index(\"\\\\n\", i1) userid = cmd[i1 +", "\"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \\ \"[color=green]!stop[/color] :", "if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) # When the bot is closed, close all connections #", "the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] : [i]Set the amount of maps to play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color]", "plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in team2] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove", "players.remove(p) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Print help text to channel \"\"\"", "players = [] broadcastMessage(\"[color=red]Gather has been stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\"", "'Copyright 2015, TS3GatherBot' __credits__ = ['<NAME>'] __licence__ = 'MIT' __version__ = '1.0.0' __maintainer__", "self.cmd_unready, \"!help\": self.cmd_help, \"!h\": self.cmd_help, # Admin commands \"!activate\": self.cmd_activate } self.name =", "% (self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\")) # Select virtual server id telnet.write(self.getenc(\"use sid=%s\\n\" % (config[\"sid\"])))", "p.uid == userid: if p.isMod: self.sendChannelMessage(\"[color=red]You can't leave your own gather. Use !stop", "team2 = l[int(PLAYERS_NEEDED/2):] plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in team1] plrs", "veto process \"\"\" def start_gather(self): global players if len(players) == PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players", "When the bot is closed, close all connections # before exiting thread self.closeBot()", "mod (the one who started the gather) \"\"\" def cmd_start(self, userid, user, data):", "telnet.read_until(self.getenc(\"msg=ok\")) # Select virtual server id telnet.write(self.getenc(\"use sid=%s\\n\" % (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) # Set", "[i]Set the amount of maps to play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] : [i]Sets you as", "teams![/color]\" % PLAYERS_NEEDED) l = players[:] import random random.shuffle(l) team1 = l[:int(PLAYERS_NEEDED/2)] team2", "if userid in admins.keys(): string += \"\\\\n\\\\n\" \\ \"[b]Admin Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>)", "global gatherRunning if gatherRunning: global players alreadyReady = False for p in players:", "players: if p.uid == userid: if p.isMod: self.sendChannelMessage(\"[color=red]You can't leave your own gather.", "utf-8 -*- \"\"\" Created by <NAME> This bot will make it easier to", "self.channel = channel self.ti = index def run(self): self.telnet = self.initBot() self.botId =", "for it in cmd: d[it[0]] = it[1] global active cmdsp = d['msg'].split(\"\\\\\\\\s\") if", "msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print a", "not been issued ex = False while not ex: # Print queue'd messages", "name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" % name)) botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr = botstr.split()[0] return int(botstr.split(\"=\")[1])", "the gather and move all players to lobby \"\"\" def cmd_stop(self, userid, user,", "run(self): self.telnet = self.initBot() self.botId = self.getPlayerId(self.name) self.channel = self.moveToChannel(self.getChannelId(self.channel)) # Print Welcome", ": [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color] : [i]Toggle this bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\" Toggle whether the", "who started the gather) \"\"\" def cmd_start(self, userid, user, data): global gatherRunning if", "be played Only available to game mods! \"\"\" def cmd_maps(self, userid, user, data):", "\"\"\" def cmd_ready(self, userid, user, data): global gatherRunning if gatherRunning: global players alreadyReady", "a server message \"\"\" def sendServerMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage", "\"bo3\" # Move all players to Lobby plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for", "up teams, move players to correct channel and start veto process \"\"\" def", "cmd_stop(self, userid, user, data): global gatherRunning global players p = None for x", "all players to Lobby plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in players]", "to %s![/color]\" % data) else: self.sendChannelMessage(\"[color=red]%s not supported![/color]\" % data) else: self.sendChannelMessage(\"[color=red]You didn't", "\"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color] : [i]Set the amount of maps", "id telnet.write(self.getenc(\"use sid=%s\\n\" % (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) # Set bot nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" %", "cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) # Read commands from user and execute them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\"", "d = {} for it in cmd: d[it[0]] = it[1] global active cmdsp", "active) or d['msg'] == '!activate': self.commands[cmdsp[0]](userid, d['invokername'], d['msg']) \"\"\" Start gather and set", "vetoSystem vetoSystem = \"bo3\" # Move all players to Lobby plrs = [\"clid=\"", "userid: p = x if gatherRunning and p.isMod: gatherRunning = False global vetoSystem", "cmdsp = d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0] in self.commands and active) or d['msg'] == '!activate':", "players[:] import random random.shuffle(l) team1 = l[:int(PLAYERS_NEEDED/2)] team2 = l[int(PLAYERS_NEEDED/2):] plrs = [\"clid=\"", "\\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \\ \"[color=green]!stop[/color]", "gather is already running![/color]\") \"\"\" Stop the gather and move all players to", "def cmd_stop(self, userid, user, data): global gatherRunning global players p = None for", "%s![/color]\" % user) else: self.sendChannelMessage(\"[color=red]A gather is already running![/color]\") \"\"\" Stop the gather", "False vetoSystem = \"bo3\" # Create lists with all the bots and their", "!stop to cancel it instead![/color]\") else: players.remove(p) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\"", "import threading import telnetlib from Config import config, maps, admins, vetoprocesses from queue", "list of commands[/color]\" ) # While an exit command has not been issued", "def __init__(self, name, password, channel, index): super(BotThread, self).__init__() self.commands = { # User", "def cmd_maps(self, userid, user, data): global gatherRunning if gatherRunning: data = data.split(\"\\\\\\\\s\") global", "self.cmd_help, \"!h\": self.cmd_help, # Admin commands \"!activate\": self.cmd_activate } self.name = name self.password", "# User commands \"!start\": self.cmd_start, \"!stop\": self.cmd_stop, \"!maps\": self.cmd_maps, \"!ready\": self.cmd_ready, \"!r\": self.cmd_ready,", "\"[color=green]!help (!h)[/color] : [i]List all available commands[/i]\\\\n\" if userid in admins.keys(): string +=", "\"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players = [] broadcastMessage(\"[color=red]Gather", "ID for this bot \"\"\" def getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" % name)) botstr", "of maps to play (default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready (!r, !gaben)[/color] : [i]Sets you as", "= True if not alreadyReady: players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s is ready![/color]\" % user) self.start_gather()", "telnet.read_until(self.getenc(\"msg=ok\")) return telnet \"\"\" Log out from telnet and close the client \"\"\"", "a gather[/i]\\\\n\" \\ \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color] : [i]Set the", "please :)) PLAYERS_NEEDED = config[\"players\"] \"\"\" Bot Thread \"\"\" class BotThread(threading.Thread): def __init__(self,", "channel self.ti = index def run(self): self.telnet = self.initBot() self.botId = self.getPlayerId(self.name) self.channel", "gather. Use !stop to cancel it instead![/color]\") else: players.remove(p) else: self.sendChannelMessage(\"[color=red]No gather currently", "userid, user, data): if userid in admins: global active active = not active", "# While an exit command has not been issued ex = False while", "= '1.0.0' __maintainer__ = '<NAME>' __status__ = 'Production' import threading import telnetlib from", "closed\") \"\"\" Get the client ID for this bot \"\"\" def getPlayerId(self, name):", "messages while not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) # Read commands from user and execute them", "userid: p = x if len(data) > 1 and p.isMod: data = data[1].lower()", "channelLobby = channel.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" % (channelLobby))) channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby", "channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby = channelLobby.split(\"\\\\n\")[1] channelLobby = channelLobby.split()[0] return int(channelLobby.split(\"=\")[1]) \"\"\" Move", "gatherRunning: global players for p in players: if p.uid == userid: if p.isMod:", "the gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color] : [i]Set the amount of maps to play (default=bo3)[/i]\\\\n\"", "\"[color=green]!maps[/color] : [i]Set the amount of maps to play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] : [i]Sets", "self.botId = None self.channel = channel self.ti = index def run(self): self.telnet =", "telnet.write(self.getenc(\"use sid=%s\\n\" % (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) # Set bot nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" % (self.name)))", "closed, close all connections # before exiting thread self.closeBot() \"\"\" Connects to the", "cmd): i1 = cmd.index(\"invokeruid\") i1 = cmd.index(\"=\", i1) i2 = cmd.index(\"\\\\n\", i1) userid", "\"\"\" def cmd_stop(self, userid, user, data): global gatherRunning global players p = None", "active: broadcastMessage(\"[color=green]GatherBot has been activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot has been deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're not", "else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Set up teams, move players to correct", "= {} for it in cmd: d[it[0]] = it[1] global active cmdsp =", "command has not been issued ex = False while not ex: # Print", "\"\"\" def sendServerMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1 msg=%s\\n\"", "userid, user, data): global gatherRunning if gatherRunning: global players alreadyReady = False for", "global players if len(players) == PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players are ready! Setting up teams![/color]\"", "nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" % (self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return telnet \"\"\" Log out from telnet", "import Queue from Player import Player # Amount of players needed to start", "by users \"\"\" def execCommand(self, cmd): i1 = cmd.index(\"invokeruid\") i1 = cmd.index(\"=\", i1)", "to correct channel and start veto process \"\"\" def start_gather(self): global players if", "# Admin commands \"!activate\": self.cmd_activate } self.name = name self.password = password self.telnet", "the client \"\"\" def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot closed\") \"\"\" Get the client", "\", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse and execute commands", "= \"\\\\n[b]Available commands are:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color] : [i]Starts", "ready[/i]\\\\n\" \\ \"[color=green]!unready (!notready, !nr, !ur)[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help", "\"\"\" Print a message to a specific channel \"\"\" def sendChannelMessage(self, msg): msg", "data): if userid in admins: global active active = not active if active:", "'MIT' __version__ = '1.0.0' __maintainer__ = '<NAME>' __status__ = 'Production' import threading import", "gatherRunning if gatherRunning: data = data.split(\"\\\\\\\\s\") global players p = None for x", "= \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets a player", ": [i]Starts a gather[/i]\\\\n\" \\ \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color] :", "= None self.botId = None self.channel = channel self.ti = index def run(self):", "telnet.read_until(self.getenc(\"msg=ok\")) # Set bot nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" % (self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return telnet \"\"\"", "'<NAME>' __copyright__ = 'Copyright 2015, TS3GatherBot' __credits__ = ['<NAME>'] __licence__ = 'MIT' __version__", "self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" % name)) botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr = botstr.split()[0] return int(botstr.split(\"=\")[1]) \"\"\"", "msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print a message to a specific channel \"\"\"", "(<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color] : [i]Toggle this bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\" Toggle whether", "self.cmd_unready, \"!notready\": self.cmd_unready, \"!nr\": self.cmd_unready, \"!ur\": self.cmd_unready, \"!help\": self.cmd_help, \"!h\": self.cmd_help, # Admin", "data): global gatherRunning if gatherRunning: data = data.split(\"\\\\\\\\s\") global players p = None", "if x.uid == userid: p = x if gatherRunning and p.isMod: gatherRunning =", "= name self.password = password self.telnet = None self.botId = None self.channel =", "{} for it in cmd: d[it[0]] = it[1] global active cmdsp = d['msg'].split(\"\\\\\\\\s\")", "\"\"\" Move user to channel \"\"\" def moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\" %", "= False vetoSystem = \"bo3\" # Create lists with all the bots and", "\"\"\" def getenc(self, str): return str.encode('ascii') \"\"\" Broadcast message to all bots \"\"\"", "= channelLobby.split()[0] return int(channelLobby.split(\"=\")[1]) \"\"\" Move user to channel \"\"\" def moveToChannel(self, channel):", "[i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color] : [i]Toggle this bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\" Toggle whether the bot", "self.cmd_ready, \"!gaben\": self.cmd_ready, \"!unready\": self.cmd_unready, \"!notready\": self.cmd_unready, \"!nr\": self.cmd_unready, \"!ur\": self.cmd_unready, \"!help\": self.cmd_help,", "Log out from telnet and close the client \"\"\" def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close()", "execute commands sent by users \"\"\" def execCommand(self, cmd): i1 = cmd.index(\"invokeruid\") i1", "self.commands[cmdsp[0]](userid, d['invokername'], d['msg']) \"\"\" Start gather and set mod (the one who started", "# Print queue'd messages while not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) # Read commands from user", "cancel it instead![/color]\") else: players.remove(p) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Print help", "bots and their Queues cmdToThread = [ Queue(), Queue(), Queue() ] bots =", "in vetoprocesses: global vetoSystem vetoSystem = data broadcastMessage(\"[color=green]Game changed to %s![/color]\" % data)", "botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr = botstr.split()[0] return int(botstr.split(\"=\")[1]) \"\"\" Get the channel ID", "-*- \"\"\" Created by <NAME> This bot will make it easier to set", "__credits__ = ['<NAME>'] __licence__ = 'MIT' __version__ = '1.0.0' __maintainer__ = '<NAME>' __status__", "set mod (the one who started the gather) \"\"\" def cmd_start(self, userid, user,", "\\ \"[b]Admin Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color] : [i]Toggle this", "\"\"\" def moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\" % (self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return channel", "self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\" % self.channel)) msg = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) # When", "active active = not active if active: broadcastMessage(\"[color=green]GatherBot has been activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot", "== PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players are ready! Setting up teams![/color]\" % PLAYERS_NEEDED) l =", "self.password))) telnet.read_until(self.getenc(\"msg=ok\")) # Select virtual server id telnet.write(self.getenc(\"use sid=%s\\n\" % (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) #", "active = not active if active: broadcastMessage(\"[color=green]GatherBot has been activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot has", "their Queues cmdToThread = [ Queue(), Queue(), Queue() ] bots = [ BotThread(config[\"user\"],", "# Set bot nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" % (self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return telnet \"\"\" Log", "channel \"\"\" def getChannelId(self, channel): channelLobby = channel.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" %", "if (cmdsp[0] in self.commands and active) or d['msg'] == '!activate': self.commands[cmdsp[0]](userid, d['invokername'], d['msg'])", "user, data): global gatherRunning global players p = None for x in players:", "data.split(\"\\\\\\\\s\") global players p = None for x in players: if x.uid ==", "name self.password = password self.telnet = None self.botId = None self.channel = channel", "= None for x in players: if x.uid == userid: p = x", "p.isMod: data = data[1].lower() if data in vetoprocesses: global vetoSystem vetoSystem = data", "True if not alreadyReady: players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s is ready![/color]\" % user) self.start_gather() else:", "% user) self.start_gather() else: self.sendChannelMessage(\"[color=red]You're already ready![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\"", "you as unready[/i]\\\\n\\\\n\" \"[color=red]Please type !help for a full list of commands[/color]\" )", "channelLobby = channelLobby.split(\"\\\\n\")[1] channelLobby = channelLobby.split()[0] return int(channelLobby.split(\"=\")[1]) \"\"\" Move user to channel", "players for p in players: if p.uid == userid: if p.isMod: self.sendChannelMessage(\"[color=red]You can't", "\\ \"[color=green]!maps[/color] : [i]Set the amount of maps to play (default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready", "self.sendChannelMessage(string) \"\"\" Toggle whether the bot is activated Only available to admins! \"\"\"", "% (plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in team2]", "user to channel \"\"\" def moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\" % (self.botId, channel)))", "Connects to the teamspeak server via telnet and returns the telnet client \"\"\"", "own gather. Use !stop to cancel it instead![/color]\") else: players.remove(p) else: self.sendChannelMessage(\"[color=red]No gather", "% PLAYERS_NEEDED) l = players[:] import random random.shuffle(l) team1 = l[:int(PLAYERS_NEEDED/2)] team2 =", "global gatherRunning if gatherRunning: global players for p in players: if p.uid ==", "client_nickname=%s\\n\" % (self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return telnet \"\"\" Log out from telnet and close", "cid=%s\\n\" % (plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets a player as not ready \"\"\"", "\"\"\" def cmd_unready(self, userid, user, data): global gatherRunning if gatherRunning: global players for", "self.channel = self.moveToChannel(self.getChannelId(self.channel)) # Print Welcome message self.sendChannelMessage( \"\\\\n[b]The GatherBot is currently running[/b]\\\\n\\\\n\"", "gatherRunning and p.isMod: gatherRunning = False global vetoSystem vetoSystem = \"bo3\" # Move", "Move all players to Lobby plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in", "value or you're not the game mod![/color]\" % data) else: self.sendChannelMessage(\"[color=red]No gather currently", "if gatherRunning: global players for p in players: if p.uid == userid: if", "in admins: global active active = not active if active: broadcastMessage(\"[color=green]GatherBot has been", "to set up gathers, all from your teamspeak 3 server. The bot requires", "channel ID from the name of the channel \"\"\" def getChannelId(self, channel): channelLobby", "self.commands and active) or d['msg'] == '!activate': self.commands[cmdsp[0]](userid, d['invokername'], d['msg']) \"\"\" Start gather", "\"\"\" def cmd_activate(self, userid, user, data): if userid in admins: global active active", "it in cmd: d[it[0]] = it[1] global active cmdsp = d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0]", "in team1] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs", "\"\"\" Stop the gather and move all players to lobby \"\"\" def cmd_stop(self,", "threading import telnetlib from Config import config, maps, admins, vetoprocesses from queue import", "!help for a full list of commands[/color]\" ) # While an exit command", "% data) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Sets a user as ready", "\"\"\" def start_gather(self): global players if len(players) == PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players are ready!", "running![/color]\") \"\"\" Print help text to channel \"\"\" def cmd_help(self, userid, user, data):", "vetoprocesses from queue import Queue from Player import Player # Amount of players", "changed to %s![/color]\" % data) else: self.sendChannelMessage(\"[color=red]%s not supported![/color]\" % data) else: self.sendChannelMessage(\"[color=red]You", "and returns the telnet client \"\"\" def initBot(self): # Connect and log in", "[ Queue(), Queue(), Queue() ] bots = [ BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"], 0), BotThread(config[\"user1\"],", "data): global gatherRunning global players p = None for x in players: if", "client \"\"\" def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot closed\") \"\"\" Get the client ID", "telnet \"\"\" Log out from telnet and close the client \"\"\" def closeBot(self):", "in cmd.split() if len(x.split(\"=\")) > 1 and not x.__contains__(\"msg=ok\")] d = {} for", "if data in vetoprocesses: global vetoSystem vetoSystem = data broadcastMessage(\"[color=green]Game changed to %s![/color]\"", "encoding of strings \"\"\" def getenc(self, str): return str.encode('ascii') \"\"\" Broadcast message to", "= False for p in players: if p.uid == userid: alreadyReady = True", "self.initBot() self.botId = self.getPlayerId(self.name) self.channel = self.moveToChannel(self.getChannelId(self.channel)) # Print Welcome message self.sendChannelMessage( \"\\\\n[b]The", "= [ Queue(), Queue(), Queue() ] bots = [ BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"], 0),", "for this bot \"\"\" def getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" % name)) botstr =", "def start_gather(self): global players if len(players) == PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players are ready! Setting", "(!h)[/color] : [i]List all available commands[/i]\\\\n\" if userid in admins.keys(): string += \"\\\\n\\\\n\"", "str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr = botstr.split()[0] return int(botstr.split(\"=\")[1]) \"\"\" Get the channel ID from the", "move all players to lobby \"\"\" def cmd_stop(self, userid, user, data): global gatherRunning", "self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for", "server query! \"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright 2015, TS3GatherBot' __credits__ =", "def run(self): self.telnet = self.initBot() self.botId = self.getPlayerId(self.name) self.channel = self.moveToChannel(self.getChannelId(self.channel)) # Print", "self.sendChannelMessage( \"\\\\n[b]The GatherBot is currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \"[color=green]!stop[/color] :", "players: if x.uid == userid: p = x if len(data) > 1 and", "up teams![/color]\" % PLAYERS_NEEDED) l = players[:] import random random.shuffle(l) team1 = l[:int(PLAYERS_NEEDED/2)]", "\"[b]Admin Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color] : [i]Toggle this bot[/i]\\\\n\"", "self.cmd_unready, \"!nr\": self.cmd_unready, \"!ur\": self.cmd_unready, \"!help\": self.cmd_help, \"!h\": self.cmd_help, # Admin commands \"!activate\":", "0), BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"], 1), BotThread(config[\"user2\"], config[\"pass2\"], config[\"g2\"], 2) ] for b in", "\"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright 2015, TS3GatherBot' __credits__ = ['<NAME>'] __licence__", "commands \"!activate\": self.cmd_activate } self.name = name self.password = password self.telnet = None", "cmd.index(\"=\", i1) i2 = cmd.index(\"\\\\n\", i1) userid = cmd[i1 + 1:i2] cmd =", "gather has been started by %s![/color]\" % user) else: self.sendChannelMessage(\"[color=red]A gather is already", "self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print a message to a specific channel \"\"\" def sendChannelMessage(self, msg):", "Sets a user as ready \"\"\" def cmd_ready(self, userid, user, data): global gatherRunning", "exiting thread self.closeBot() \"\"\" Connects to the teamspeak server via telnet and returns", "cmd_ready(self, userid, user, data): global gatherRunning if gatherRunning: global players alreadyReady = False", "\\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color] : [i]Toggle this bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\"", "Init the app \"\"\" active = True players = [] gatherRunning = False", "from Config import config, maps, admins, vetoprocesses from queue import Queue from Player", "= cmd.index(\"=\", i1) i2 = cmd.index(\"\\\\n\", i1) userid = cmd[i1 + 1:i2] cmd", "%s %s\\n\" % (self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\")) # Select virtual server id telnet.write(self.getenc(\"use sid=%s\\n\"", "if gatherRunning and p.isMod: gatherRunning = False global vetoSystem vetoSystem = \"bo3\" #", "print(\"Bot closed\") \"\"\" Get the client ID for this bot \"\"\" def getPlayerId(self,", "telnet and close the client \"\"\" def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot closed\") \"\"\"", "to all bots \"\"\" def broadcastMessage(msg): for q in cmdToThread: q.put(msg) \"\"\" Init", "and p.isMod: gatherRunning = False global vetoSystem vetoSystem = \"bo3\" # Move all", "vetoSystem = \"bo3\" # Move all players to Lobby plrs = [\"clid=\" +", "one who started the gather) \"\"\" def cmd_start(self, userid, user, data): global gatherRunning", "self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players = [] broadcastMessage(\"[color=red]Gather has been stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently", "the amount of maps to play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] : [i]Sets you as ready[/i]\\\\n\"", "telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login %s %s\\n\" % (self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\")) # Select virtual server", "\\ \"[color=green]!unready (!notready, !nr, !ur)[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help (!h)[/color]", "(the one who started the gather) \"\"\" def cmd_start(self, userid, user, data): global", "= str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) # When the bot is closed, close all", "has not been issued ex = False while not ex: # Print queue'd", "gather[/i]\\\\n\" \\ \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color] : [i]Set the amount", "admins! \"\"\" def cmd_activate(self, userid, user, data): if userid in admins: global active", "been issued ex = False while not ex: # Print queue'd messages while", "process \"\"\" def start_gather(self): global players if len(players) == PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players are", "= self.initBot() self.botId = self.getPlayerId(self.name) self.channel = self.moveToChannel(self.getChannelId(self.channel)) # Print Welcome message self.sendChannelMessage(", "Print a message to a specific channel \"\"\" def sendChannelMessage(self, msg): msg =", "\"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs = [\"clid=\" + str(self.getPlayerId(x.name))", "Set up teams, move players to correct channel and start veto process \"\"\"", "via telnet and returns the telnet client \"\"\" def initBot(self): # Connect and", "has been activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot has been deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're not an admin,", "and their Queues cmdToThread = [ Queue(), Queue(), Queue() ] bots = [", "Player # Amount of players needed to start gather (even number please :))", "not x.__contains__(\"msg=ok\")] d = {} for it in cmd: d[it[0]] = it[1] global", "# Print Welcome message self.sendChannelMessage( \"\\\\n[b]The GatherBot is currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] : [i]Starts", "channel, index): super(BotThread, self).__init__() self.commands = { # User commands \"!start\": self.cmd_start, \"!stop\":", "plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players =", "GatherBot is currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \"[color=green]!stop[/color] : [i]Stops the", "\"\\\\n\\\\n\" \\ \"[b]Admin Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color] : [i]Toggle", "Lobby plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in players] plrs = \"|\".join(plrs)", "server telnet = telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login %s %s\\n\" % (self.name, self.password)))", "maps, admins, vetoprocesses from queue import Queue from Player import Player # Amount", "self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print a message to a", "and p.isMod: data = data[1].lower() if data in vetoprocesses: global vetoSystem vetoSystem =", "self.execCommand(msg) # When the bot is closed, close all connections # before exiting", "and not x.__contains__(\"msg=ok\")] d = {} for it in cmd: d[it[0]] = it[1]", "start_gather(self): global players if len(players) == PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players are ready! Setting up", "currently running![/color]\") \"\"\" Print help text to channel \"\"\" def cmd_help(self, userid, user,", "to cancel it instead![/color]\") else: players.remove(p) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Print", "queue'd messages while not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) # Read commands from user and execute", "Config import config, maps, admins, vetoprocesses from queue import Queue from Player import", "broadcastMessage(\"[color=red]Gather has been stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Change the amount", "__licence__ = 'MIT' __version__ = '1.0.0' __maintainer__ = '<NAME>' __status__ = 'Production' import", "\"[color=green]!ready[/color] : [i]Sets you as ready[/i]\\\\n\" \"[color=green]!unready[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \"[color=red]Please", ": [i]Sets you as unready[/i]\\\\n\\\\n\" \"[color=red]Please type !help for a full list of", "config[\"players\"] \"\"\" Bot Thread \"\"\" class BotThread(threading.Thread): def __init__(self, name, password, channel, index):", "a full list of commands[/color]\" ) # While an exit command has not", "the channel ID from the name of the channel \"\"\" def getChannelId(self, channel):", "currently running![/color]\") \"\"\" Sets a user as ready \"\"\" def cmd_ready(self, userid, user,", "players: if p.uid == userid: alreadyReady = True if not alreadyReady: players.append(Player(user, userid))", "queue import Queue from Player import Player # Amount of players needed to", "all connections # before exiting thread self.closeBot() \"\"\" Connects to the teamspeak server", "it instead![/color]\") else: players.remove(p) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Print help text", "<NAME> This bot will make it easier to set up gathers, all from", "cmd[i1 + 1:i2] cmd = [x.split(\"=\") for x in cmd.split() if len(x.split(\"=\")) >", "p in players: if p.uid == userid: if p.isMod: self.sendChannelMessage(\"[color=red]You can't leave your", "players: if x.uid == userid: p = x if gatherRunning and p.isMod: gatherRunning", "all from your teamspeak 3 server. The bot requires access to the Teamspeak", "The bot requires access to the Teamspeak 3 server query! \"\"\" __author__ =", ") # While an exit command has not been issued ex = False", "[x.split(\"=\") for x in cmd.split() if len(x.split(\"=\")) > 1 and not x.__contains__(\"msg=ok\")] d", "q in cmdToThread: q.put(msg) \"\"\" Init the app \"\"\" active = True players", "= [] gatherRunning = False vetoSystem = \"bo3\" # Create lists with all", "Read commands from user and execute them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\" % self.channel)) msg", "user) else: self.sendChannelMessage(\"[color=red]A gather is already running![/color]\") \"\"\" Stop the gather and move", "plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in players] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove", "3 server. The bot requires access to the Teamspeak 3 server query! \"\"\"", "as ready \"\"\" def cmd_ready(self, userid, user, data): global gatherRunning if gatherRunning: global", ": [i]Sets you as ready[/i]\\\\n\" \\ \"[color=green]!unready (!notready, !nr, !ur)[/color] : [i]Sets you", "= \"bo3\" # Create lists with all the bots and their Queues cmdToThread", "-*- coding: utf-8 -*- \"\"\" Created by <NAME> This bot will make it", "channelLobby = channelLobby.split()[0] return int(channelLobby.split(\"=\")[1]) \"\"\" Move user to channel \"\"\" def moveToChannel(self,", "Broadcast message to all bots \"\"\" def broadcastMessage(msg): for q in cmdToThread: q.put(msg)", "Admin commands \"!activate\": self.cmd_activate } self.name = name self.password = password self.telnet =", "Set bot nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" % (self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return telnet \"\"\" Log out", "data = data.split(\"\\\\\\\\s\") global players p = None for x in players: if", "out from telnet and close the client \"\"\" def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot", "\\ \"[color=green]!ready (!r, !gaben)[/color] : [i]Sets you as ready[/i]\\\\n\" \\ \"[color=green]!unready (!notready, !nr,", "\"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets a player as", "= msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse and", "userid in admins: global active active = not active if active: broadcastMessage(\"[color=green]GatherBot has", "str(self.getPlayerId(x.name)) for x in team2] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs,", "config[\"gl\"], 0), BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"], 1), BotThread(config[\"user2\"], config[\"pass2\"], config[\"g2\"], 2) ] for b", "\"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse and execute commands sent", "in players: if x.uid == userid: p = x if gatherRunning and p.isMod:", "to game mods! \"\"\" def cmd_maps(self, userid, user, data): global gatherRunning if gatherRunning:", "p = x if len(data) > 1 and p.isMod: data = data[1].lower() if", "ready! Setting up teams![/color]\" % PLAYERS_NEEDED) l = players[:] import random random.shuffle(l) team1", "user, data): global gatherRunning if not gatherRunning: gatherRunning = True global players players.append(Player(user,", "server id telnet.write(self.getenc(\"use sid=%s\\n\" % (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) # Set bot nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\"", "cmd.split() if len(x.split(\"=\")) > 1 and not x.__contains__(\"msg=ok\")] d = {} for it", "(cmdsp[0] in self.commands and active) or d['msg'] == '!activate': self.commands[cmdsp[0]](userid, d['invokername'], d['msg']) \"\"\"", "thread self.closeBot() \"\"\" Connects to the teamspeak server via telnet and returns the", "to server telnet = telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login %s %s\\n\" % (self.name,", "self.sendChannelMessage(cmdToThread[self.ti].get()) # Read commands from user and execute them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\" %", "# Select virtual server id telnet.write(self.getenc(\"use sid=%s\\n\" % (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) # Set bot", "(msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print a message to a specific channel \"\"\" def sendChannelMessage(self,", "Only available to game mods! \"\"\" def cmd_maps(self, userid, user, data): global gatherRunning", "for p in players: if p.uid == userid: alreadyReady = True if not", "and set mod (the one who started the gather) \"\"\" def cmd_start(self, userid,", "and start veto process \"\"\" def start_gather(self): global players if len(players) == PLAYERS_NEEDED:", "[i]Set the amount of maps to play (default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready (!r, !gaben)[/color] :", "in cmdToThread: q.put(msg) \"\"\" Init the app \"\"\" active = True players =", "password self.telnet = None self.botId = None self.channel = channel self.ti = index", "to the teamspeak server via telnet and returns the telnet client \"\"\" def", "self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse and execute commands sent by", "\\ \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color] : [i]Set the amount of", "\", \"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" % (channelLobby))) channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby = channelLobby.split(\"\\\\n\")[1] channelLobby", "l = players[:] import random random.shuffle(l) team1 = l[:int(PLAYERS_NEEDED/2)] team2 = l[int(PLAYERS_NEEDED/2):] plrs", "str.encode('ascii') \"\"\" Broadcast message to all bots \"\"\" def broadcastMessage(msg): for q in", "gatherRunning: gatherRunning = True global players players.append(Player(user, userid, True)) broadcastMessage(\"[color=green]A gather has been", "%s![/color]\" % data) else: self.sendChannelMessage(\"[color=red]%s not supported![/color]\" % data) else: self.sendChannelMessage(\"[color=red]You didn't enter", "(even number please :)) PLAYERS_NEEDED = config[\"players\"] \"\"\" Bot Thread \"\"\" class BotThread(threading.Thread):", "botstr = botstr.split()[0] return int(botstr.split(\"=\")[1]) \"\"\" Get the channel ID from the name", "\"\"\" def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot closed\") \"\"\" Get the client ID for", "data) else: self.sendChannelMessage(\"[color=red]%s not supported![/color]\" % data) else: self.sendChannelMessage(\"[color=red]You didn't enter a value", "a user as ready \"\"\" def cmd_ready(self, userid, user, data): global gatherRunning if", "# Connect and log in to server telnet = telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host, telnet.port)", "teamspeak 3 server. The bot requires access to the Teamspeak 3 server query!", "= config[\"players\"] \"\"\" Bot Thread \"\"\" class BotThread(threading.Thread): def __init__(self, name, password, channel,", "vetoprocesses: global vetoSystem vetoSystem = data broadcastMessage(\"[color=green]Game changed to %s![/color]\" % data) else:", "of the channel \"\"\" def getChannelId(self, channel): channelLobby = channel.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"channelfind", "(self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return telnet \"\"\" Log out from telnet and close the client", "in players: if x.uid == userid: p = x if len(data) > 1", "and execute commands sent by users \"\"\" def execCommand(self, cmd): i1 = cmd.index(\"invokeruid\")", "self.password = password self.telnet = None self.botId = None self.channel = channel self.ti", "(config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) # Set bot nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" % (self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return telnet", "= ['<NAME>'] __licence__ = 'MIT' __version__ = '1.0.0' __maintainer__ = '<NAME>' __status__ =", "ready![/color]\" % user) self.start_gather() else: self.sendChannelMessage(\"[color=red]You're already ready![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\")", "in players: if p.uid == userid: if p.isMod: self.sendChannelMessage(\"[color=red]You can't leave your own", "\"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print a message to", "played Only available to game mods! \"\"\" def cmd_maps(self, userid, user, data): global", "cid=%s\\n\" % (plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players = [] broadcastMessage(\"[color=red]Gather has been stopped![/color]\")", "the game mod![/color]\" % data) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Sets a", "amount of maps to play (default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready (!r, !gaben)[/color] : [i]Sets you", "Print Welcome message self.sendChannelMessage( \"\\\\n[b]The GatherBot is currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] : [i]Starts a", "else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Change the amount of maps that will", "players] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players", "\"!help\": self.cmd_help, \"!h\": self.cmd_help, # Admin commands \"!activate\": self.cmd_activate } self.name = name", "if p.isMod: self.sendChannelMessage(\"[color=red]You can't leave your own gather. Use !stop to cancel it", "p.isMod: self.sendChannelMessage(\"[color=red]You can't leave your own gather. Use !stop to cancel it instead![/color]\")", "of players needed to start gather (even number please :)) PLAYERS_NEEDED = config[\"players\"]", "return int(botstr.split(\"=\")[1]) \"\"\" Get the channel ID from the name of the channel", "telnetlib from Config import config, maps, admins, vetoprocesses from queue import Queue from", "% (self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return telnet \"\"\" Log out from telnet and close the", "\"\"\" Get the channel ID from the name of the channel \"\"\" def", "msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse and execute", "id=%s\\n\" % self.channel)) msg = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) # When the bot", "Queue from Player import Player # Amount of players needed to start gather", "'!activate': self.commands[cmdsp[0]](userid, d['invokername'], d['msg']) \"\"\" Start gather and set mod (the one who", "def sendChannelMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\" % (msg)))", "# Move all players to Lobby plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x", "== userid: alreadyReady = True if not alreadyReady: players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s is ready![/color]\"", "of maps to play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] : [i]Sets you as ready[/i]\\\\n\" \"[color=green]!unready[/color] :", "[i]Sets you as unready[/i]\\\\n\\\\n\" \"[color=red]Please type !help for a full list of commands[/color]\"", "[ BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"], 0), BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"], 1), BotThread(config[\"user2\"], config[\"pass2\"], config[\"g2\"], 2)", "return int(channelLobby.split(\"=\")[1]) \"\"\" Move user to channel \"\"\" def moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove clid=%s", "initBot(self): # Connect and log in to server telnet = telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host,", "closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot closed\") \"\"\" Get the client ID for this bot", "channel \"\"\" def moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\" % (self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return", "= l[int(PLAYERS_NEEDED/2):] plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in team1] plrs =", "\"!h\": self.cmd_help, # Admin commands \"!activate\": self.cmd_activate } self.name = name self.password =", "data) else: self.sendChannelMessage(\"[color=red]You didn't enter a value or you're not the game mod![/color]\"", "[i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \\ \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\"", "if userid in admins: global active active = not active if active: broadcastMessage(\"[color=green]GatherBot", "if gatherRunning: global players alreadyReady = False for p in players: if p.uid", "gatherRunning = True global players players.append(Player(user, userid, True)) broadcastMessage(\"[color=green]A gather has been started", "\"[color=green]!activate[/color] : [i]Toggle this bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\" Toggle whether the bot is activated", "+ str(self.getPlayerId(x.name)) for x in team1] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" %", "plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in team1] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove", "cmd_activate(self, userid, user, data): if userid in admins: global active active = not", "self.telnet = None self.botId = None self.channel = channel self.ti = index def", "self.getPlayerId(self.name) self.channel = self.moveToChannel(self.getChannelId(self.channel)) # Print Welcome message self.sendChannelMessage( \"\\\\n[b]The GatherBot is currently", "False global vetoSystem vetoSystem = \"bo3\" # Move all players to Lobby plrs", "you as ready[/i]\\\\n\" \\ \"[color=green]!unready (!notready, !nr, !ur)[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\"", "available to admins! \"\"\" def cmd_activate(self, userid, user, data): if userid in admins:", "\"\"\" def initBot(self): # Connect and log in to server telnet = telnetlib.Telnet(config[\"host\"],", "this bot \"\"\" def getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" % name)) botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\")))", "your teamspeak 3 server. The bot requires access to the Teamspeak 3 server", "virtual server id telnet.write(self.getenc(\"use sid=%s\\n\" % (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) # Set bot nickname telnet.write(self.getenc(\"clientupdate", "p = None for x in players: if x.uid == userid: p =", "\"\"\" Created by <NAME> This bot will make it easier to set up", "\"\"\" Print help text to channel \"\"\" def cmd_help(self, userid, user, data): string", "self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Set up teams, move players to correct channel", "gather[/i]\\\\n\" \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] : [i]Set the amount of maps", "text to channel \"\"\" def cmd_help(self, userid, user, data): string = \"\\\\n[b]Available commands", "commands[/i]\\\\n\" if userid in admins.keys(): string += \"\\\\n\\\\n\" \\ \"[b]Admin Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd>", "\"\"\" Get the client ID for this bot \"\"\" def getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind", "unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help (!h)[/color] : [i]List all available commands[/i]\\\\n\" if userid in admins.keys():", "1:i2] cmd = [x.split(\"=\") for x in cmd.split() if len(x.split(\"=\")) > 1 and", "(plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players = [] broadcastMessage(\"[color=red]Gather has been stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No", "> 1 and p.isMod: data = data[1].lower() if data in vetoprocesses: global vetoSystem", "import config, maps, admins, vetoprocesses from queue import Queue from Player import Player", "return str.encode('ascii') \"\"\" Broadcast message to all bots \"\"\" def broadcastMessage(msg): for q", "gather currently running![/color]\") \"\"\" Sets a user as ready \"\"\" def cmd_ready(self, userid,", "p in players: if p.uid == userid: alreadyReady = True if not alreadyReady:", "gather (even number please :)) PLAYERS_NEEDED = config[\"players\"] \"\"\" Bot Thread \"\"\" class", "running![/color]\") \"\"\" Set up teams, move players to correct channel and start veto", "the bot is closed, close all connections # before exiting thread self.closeBot() \"\"\"", "def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\")) self.telnet.close() print(\"Bot closed\") \"\"\" Get the client ID for this", "= cmd.index(\"invokeruid\") i1 = cmd.index(\"=\", i1) i2 = cmd.index(\"\\\\n\", i1) userid = cmd[i1", "= channel.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" % (channelLobby))) channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby =", "\"\"\" def cmd_help(self, userid, user, data): string = \"\\\\n[b]Available commands are:[/b]\\\\n\" \\ \"[color=grey]!<cmd>", "supported![/color]\" % data) else: self.sendChannelMessage(\"[color=red]You didn't enter a value or you're not the", "a player as not ready \"\"\" def cmd_unready(self, userid, user, data): global gatherRunning", "all players to lobby \"\"\" def cmd_stop(self, userid, user, data): global gatherRunning global", "= self.getPlayerId(self.name) self.channel = self.moveToChannel(self.getChannelId(self.channel)) # Print Welcome message self.sendChannelMessage( \"\\\\n[b]The GatherBot is", "userid, user, data): string = \"\\\\n[b]Available commands are:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\"", "\"\\\\n[b]The GatherBot is currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \"[color=green]!stop[/color] : [i]Stops", "mod![/color]\" % data) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Sets a user as", "ID from the name of the channel \"\"\" def getChannelId(self, channel): channelLobby =", "class BotThread(threading.Thread): def __init__(self, name, password, channel, index): super(BotThread, self).__init__() self.commands = {", "\"!ur\": self.cmd_unready, \"!help\": self.cmd_help, \"!h\": self.cmd_help, # Admin commands \"!activate\": self.cmd_activate } self.name", "password, channel, index): super(BotThread, self).__init__() self.commands = { # User commands \"!start\": self.cmd_start,", "query! \"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright 2015, TS3GatherBot' __credits__ = ['<NAME>']", "not supported![/color]\" % data) else: self.sendChannelMessage(\"[color=red]You didn't enter a value or you're not", "self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Sets a user as ready \"\"\" def cmd_ready(self,", "exit command has not been issued ex = False while not ex: #", "\"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" % (channelLobby))) channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby = channelLobby.split(\"\\\\n\")[1] channelLobby =", "up gathers, all from your teamspeak 3 server. The bot requires access to", "%s cid=%s\\n\" % (plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets a player as not ready", "activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot has been deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're not an admin, GTFO![/color]\") \"\"\"", "p.uid == userid: alreadyReady = True if not alreadyReady: players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s is", "% (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print a message to a specific channel \"\"\" def", "= password self.telnet = None self.botId = None self.channel = channel self.ti =", "self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Print help text to channel \"\"\" def cmd_help(self,", "int(channelLobby.split(\"=\")[1]) \"\"\" Move user to channel \"\"\" def moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\"", "you as ready[/i]\\\\n\" \"[color=green]!unready[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \"[color=red]Please type !help for", "execute them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\" % self.channel)) msg = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg)", "self.commands = { # User commands \"!start\": self.cmd_start, \"!stop\": self.cmd_stop, \"!maps\": self.cmd_maps, \"!ready\":", "else: self.sendChannelMessage(\"[color=red]%s not supported![/color]\" % data) else: self.sendChannelMessage(\"[color=red]You didn't enter a value or", "them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\" % self.channel)) msg = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) #", "correct channel and start veto process \"\"\" def start_gather(self): global players if len(players)", "\"\"\" def execCommand(self, cmd): i1 = cmd.index(\"invokeruid\") i1 = cmd.index(\"=\", i1) i2 =", "(default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] : [i]Sets you as ready[/i]\\\\n\" \"[color=green]!unready[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\"", "userid, user, data): global gatherRunning global players p = None for x in", "data in vetoprocesses: global vetoSystem vetoSystem = data broadcastMessage(\"[color=green]Game changed to %s![/color]\" %", "needed to start gather (even number please :)) PLAYERS_NEEDED = config[\"players\"] \"\"\" Bot", "False while not ex: # Print queue'd messages while not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) #", "\"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color] : [i]Toggle this bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\" Toggle", "global players p = None for x in players: if x.uid == userid:", "= not active if active: broadcastMessage(\"[color=green]GatherBot has been activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot has been", "% (channelLobby))) channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby = channelLobby.split(\"\\\\n\")[1] channelLobby = channelLobby.split()[0] return int(channelLobby.split(\"=\")[1])", "clid=%s cid=%s\\n\" % (self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return channel \"\"\" Print out a server", "broadcastMessage(\"[color=red]GatherBot has been deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're not an admin, GTFO![/color]\") \"\"\" Fix encoding", "global vetoSystem vetoSystem = \"bo3\" # Move all players to Lobby plrs =", "gatherRunning: data = data.split(\"\\\\\\\\s\") global players p = None for x in players:", "!nr, !ur)[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help (!h)[/color] : [i]List all", "\"\"\" def getChannelId(self, channel): channelLobby = channel.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" % (channelLobby)))", "returns the telnet client \"\"\" def initBot(self): # Connect and log in to", "str): return str.encode('ascii') \"\"\" Broadcast message to all bots \"\"\" def broadcastMessage(msg): for", "userid: if p.isMod: self.sendChannelMessage(\"[color=red]You can't leave your own gather. Use !stop to cancel", "Setting up teams![/color]\" % PLAYERS_NEEDED) l = players[:] import random random.shuffle(l) team1 =", "is activated Only available to admins! \"\"\" def cmd_activate(self, userid, user, data): if", "all the bots and their Queues cmdToThread = [ Queue(), Queue(), Queue() ]", "def cmd_unready(self, userid, user, data): global gatherRunning if gatherRunning: global players for p", "bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\" Toggle whether the bot is activated Only available to admins!", "to admins! \"\"\" def cmd_activate(self, userid, user, data): if userid in admins: global", "['<NAME>'] __licence__ = 'MIT' __version__ = '1.0.0' __maintainer__ = '<NAME>' __status__ = 'Production'", "i1) i2 = cmd.index(\"\\\\n\", i1) userid = cmd[i1 + 1:i2] cmd = [x.split(\"=\")", "\"[color=green]!ready (!r, !gaben)[/color] : [i]Sets you as ready[/i]\\\\n\" \\ \"[color=green]!unready (!notready, !nr, !ur)[/color]", "= cmd.index(\"\\\\n\", i1) userid = cmd[i1 + 1:i2] cmd = [x.split(\"=\") for x", "\"\"\" Start gather and set mod (the one who started the gather) \"\"\"", "self.cmd_start, \"!stop\": self.cmd_stop, \"!maps\": self.cmd_maps, \"!ready\": self.cmd_ready, \"!r\": self.cmd_ready, \"!gaben\": self.cmd_ready, \"!unready\": self.cmd_unready,", "players.append(Player(user, userid, True)) broadcastMessage(\"[color=green]A gather has been started by %s![/color]\" % user) else:", "targetmode=2 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse and execute commands sent by users", "\"\"\" def broadcastMessage(msg): for q in cmdToThread: q.put(msg) \"\"\" Init the app \"\"\"", "teamspeak server via telnet and returns the telnet client \"\"\" def initBot(self): #", "= \"bo3\" # Move all players to Lobby plrs = [\"clid=\" + str(self.getPlayerId(x.name))", "(default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready (!r, !gaben)[/color] : [i]Sets you as ready[/i]\\\\n\" \\ \"[color=green]!unready (!notready,", "BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"], 0), BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"], 1), BotThread(config[\"user2\"], config[\"pass2\"], config[\"g2\"], 2) ]", "name, password, channel, index): super(BotThread, self).__init__() self.commands = { # User commands \"!start\":", "{ # User commands \"!start\": self.cmd_start, \"!stop\": self.cmd_stop, \"!maps\": self.cmd_maps, \"!ready\": self.cmd_ready, \"!r\":", "telnet client \"\"\" def initBot(self): # Connect and log in to server telnet", "gatherRunning if not gatherRunning: gatherRunning = True global players players.append(Player(user, userid, True)) broadcastMessage(\"[color=green]A", "% data) else: self.sendChannelMessage(\"[color=red]You didn't enter a value or you're not the game", "you as unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help (!h)[/color] : [i]List all available commands[/i]\\\\n\" if userid", "app \"\"\" active = True players = [] gatherRunning = False vetoSystem =", "user as ready \"\"\" def cmd_ready(self, userid, user, data): global gatherRunning if gatherRunning:", "as ready[/i]\\\\n\" \\ \"[color=green]!unready (!notready, !nr, !ur)[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \\", "players to Lobby plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in players] plrs", "= botstr.split()[0] return int(botstr.split(\"=\")[1]) \"\"\" Get the channel ID from the name of", "[\"clid=\" + str(self.getPlayerId(x.name)) for x in team2] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\"", "\"[color=green]!maps[/color] : [i]Set the amount of maps to play (default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready (!r,", "if len(x.split(\"=\")) > 1 and not x.__contains__(\"msg=ok\")] d = {} for it in", "not active if active: broadcastMessage(\"[color=green]GatherBot has been activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot has been deactivated[/color]\")", "x.__contains__(\"msg=ok\")] d = {} for it in cmd: d[it[0]] = it[1] global active", "channel \"\"\" Print out a server message \"\"\" def sendServerMessage(self, msg): msg =", "Start gather and set mod (the one who started the gather) \"\"\" def", "% (self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return channel \"\"\" Print out a server message \"\"\"", "else: self.sendChannelMessage(\"[color=red]A gather is already running![/color]\") \"\"\" Stop the gather and move all", "(<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \\ \"[color=green]!stop[/color] : [i]Stops", "Queue(), Queue() ] bots = [ BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"], 0), BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"],", "= str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr = botstr.split()[0] return int(botstr.split(\"=\")[1]) \"\"\" Get the channel ID from", "the channel \"\"\" def getChannelId(self, channel): channelLobby = channel.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\"", ": [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \\ \"[color=green]!stop[/color] : [i]Stops the", "global active active = not active if active: broadcastMessage(\"[color=green]GatherBot has been activated[/color]\") else:", "userid, user, data): global gatherRunning if not gatherRunning: gatherRunning = True global players", "i1 = cmd.index(\"invokeruid\") i1 = cmd.index(\"=\", i1) i2 = cmd.index(\"\\\\n\", i1) userid =", "\"\"\" Broadcast message to all bots \"\"\" def broadcastMessage(msg): for q in cmdToThread:", "for x in players: if x.uid == userid: p = x if gatherRunning", "message to all bots \"\"\" def broadcastMessage(msg): for q in cmdToThread: q.put(msg) \"\"\"", "= d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0] in self.commands and active) or d['msg'] == '!activate': self.commands[cmdsp[0]](userid,", "__copyright__ = 'Copyright 2015, TS3GatherBot' __credits__ = ['<NAME>'] __licence__ = 'MIT' __version__ =", "\"!gaben\": self.cmd_ready, \"!unready\": self.cmd_unready, \"!notready\": self.cmd_unready, \"!nr\": self.cmd_unready, \"!ur\": self.cmd_unready, \"!help\": self.cmd_help, \"!h\":", "sid=%s\\n\" % (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\")) # Set bot nickname telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" % (self.name))) telnet.read_until(self.getenc(\"msg=ok\"))", "moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\" % (self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return channel \"\"\" Print", "requires access to the Teamspeak 3 server query! \"\"\" __author__ = '<NAME>' __copyright__", "easier to set up gathers, all from your teamspeak 3 server. The bot", "% user) else: self.sendChannelMessage(\"[color=red]A gather is already running![/color]\") \"\"\" Stop the gather and", "didn't enter a value or you're not the game mod![/color]\" % data) else:", "Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color] : [i]Toggle this bot[/i]\\\\n\" self.sendChannelMessage(string)", "msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\"", "gather and set mod (the one who started the gather) \"\"\" def cmd_start(self,", "self.name = name self.password = password self.telnet = None self.botId = None self.channel", "in players] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3)", "msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) # When the bot is closed, close all connections # before", ": [i]Sets you as ready[/i]\\\\n\" \"[color=green]!unready[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \"[color=red]Please type", "if not gatherRunning: gatherRunning = True global players players.append(Player(user, userid, True)) broadcastMessage(\"[color=green]A gather", "the bots and their Queues cmdToThread = [ Queue(), Queue(), Queue() ] bots", "the client ID for this bot \"\"\" def getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" %", "> 1 and not x.__contains__(\"msg=ok\")] d = {} for it in cmd: d[it[0]]", "to start gather (even number please :)) PLAYERS_NEEDED = config[\"players\"] \"\"\" Bot Thread", "Get the client ID for this bot \"\"\" def getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\"", "move players to correct channel and start veto process \"\"\" def start_gather(self): global", "channel \"\"\" def cmd_help(self, userid, user, data): string = \"\\\\n[b]Available commands are:[/b]\\\\n\" \\", "maps to play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] : [i]Sets you as ready[/i]\\\\n\" \"[color=green]!unready[/color] : [i]Sets", "+= \"\\\\n\\\\n\" \\ \"[b]Admin Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color] :", "self.sendChannelMessage(\"[color=red]%s not supported![/color]\" % data) else: self.sendChannelMessage(\"[color=red]You didn't enter a value or you're", "server via telnet and returns the telnet client \"\"\" def initBot(self): # Connect", "def getenc(self, str): return str.encode('ascii') \"\"\" Broadcast message to all bots \"\"\" def", "user and execute them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\" % self.channel)) msg = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if", "import Player # Amount of players needed to start gather (even number please", "(self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return channel \"\"\" Print out a server message \"\"\" def", "target=1 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print a message to a specific channel", "This bot will make it easier to set up gathers, all from your", "= False global vetoSystem vetoSystem = \"bo3\" # Move all players to Lobby", "'1.0.0' __maintainer__ = '<NAME>' __status__ = 'Production' import threading import telnetlib from Config", "[\"clid=\" + str(self.getPlayerId(x.name)) for x in team1] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\"", "a gather[/i]\\\\n\" \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] : [i]Set the amount of", "Print out a server message \"\"\" def sendServerMessage(self, msg): msg = msg.replace(\" \",", "[i]Toggle this bot[/i]\\\\n\" self.sendChannelMessage(string) \"\"\" Toggle whether the bot is activated Only available", "self.telnet.close() print(\"Bot closed\") \"\"\" Get the client ID for this bot \"\"\" def", "else: broadcastMessage(\"[color=red]GatherBot has been deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're not an admin, GTFO![/color]\") \"\"\" Fix", "= True players = [] gatherRunning = False vetoSystem = \"bo3\" # Create", "msg = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) # When the bot is closed, close", "ready \"\"\" def cmd_unready(self, userid, user, data): global gatherRunning if gatherRunning: global players", "def moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\" % (self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return channel \"\"\"", "for x in cmd.split() if len(x.split(\"=\")) > 1 and not x.__contains__(\"msg=ok\")] d =", "userid, user, data): global gatherRunning if gatherRunning: data = data.split(\"\\\\\\\\s\") global players p", "= '<NAME>' __status__ = 'Production' import threading import telnetlib from Config import config,", "\"!activate\": self.cmd_activate } self.name = name self.password = password self.telnet = None self.botId", "before exiting thread self.closeBot() \"\"\" Connects to the teamspeak server via telnet and", "self.botId = self.getPlayerId(self.name) self.channel = self.moveToChannel(self.getChannelId(self.channel)) # Print Welcome message self.sendChannelMessage( \"\\\\n[b]The GatherBot", "in to server telnet = telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login %s %s\\n\" %", "def execCommand(self, cmd): i1 = cmd.index(\"invokeruid\") i1 = cmd.index(\"=\", i1) i2 = cmd.index(\"\\\\n\",", "\"\"\" Sets a player as not ready \"\"\" def cmd_unready(self, userid, user, data):", "activated Only available to admins! \"\"\" def cmd_activate(self, userid, user, data): if userid", "global players alreadyReady = False for p in players: if p.uid == userid:", "will be played Only available to game mods! \"\"\" def cmd_maps(self, userid, user,", "an admin, GTFO![/color]\") \"\"\" Fix encoding of strings \"\"\" def getenc(self, str): return", "super(BotThread, self).__init__() self.commands = { # User commands \"!start\": self.cmd_start, \"!stop\": self.cmd_stop, \"!maps\":", "\"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] : [i]Set the amount of maps to", "Queue(), Queue(), Queue() ] bots = [ BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"], 0), BotThread(config[\"user1\"], config[\"pass1\"],", "config[\"port\"]) telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login %s %s\\n\" % (self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\")) # Select virtual", "players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s is ready![/color]\" % user) self.start_gather() else: self.sendChannelMessage(\"[color=red]You're already ready![/color]\") else:", "== userid: p = x if gatherRunning and p.isMod: gatherRunning = False global", "\"[color=green]!unready (!notready, !nr, !ur)[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help (!h)[/color] :", ": [i]List all available commands[/i]\\\\n\" if userid in admins.keys(): string += \"\\\\n\\\\n\" \\", "the app \"\"\" active = True players = [] gatherRunning = False vetoSystem", "random.shuffle(l) team1 = l[:int(PLAYERS_NEEDED/2)] team2 = l[int(PLAYERS_NEEDED/2):] plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for", "= it[1] global active cmdsp = d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0] in self.commands and active)", "\"\"\" Log out from telnet and close the client \"\"\" def closeBot(self): self.telnet.write(self.getenc(\"logout\\n\"))", "as not ready \"\"\" def cmd_unready(self, userid, user, data): global gatherRunning if gatherRunning:", "message self.sendChannelMessage( \"\\\\n[b]The GatherBot is currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \"[color=green]!stop[/color]", "def cmd_start(self, userid, user, data): global gatherRunning if not gatherRunning: gatherRunning = True", "= { # User commands \"!start\": self.cmd_start, \"!stop\": self.cmd_stop, \"!maps\": self.cmd_maps, \"!ready\": self.cmd_ready,", "started the gather) \"\"\" def cmd_start(self, userid, user, data): global gatherRunning if not", "teams, move players to correct channel and start veto process \"\"\" def start_gather(self):", "\"\"\" Change the amount of maps that will be played Only available to", "if not alreadyReady: players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s is ready![/color]\" % user) self.start_gather() else: self.sendChannelMessage(\"[color=red]You're", "to a specific channel \"\"\" def sendChannelMessage(self, msg): msg = msg.replace(\" \", \"\\s\")", "players = [] gatherRunning = False vetoSystem = \"bo3\" # Create lists with", "__status__ = 'Production' import threading import telnetlib from Config import config, maps, admins,", "\"[color=green]!unready[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \"[color=red]Please type !help for a full list", "= data.split(\"\\\\\\\\s\") global players p = None for x in players: if x.uid", "event=textchannel id=%s\\n\" % self.channel)) msg = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) # When the", "2015, TS3GatherBot' __credits__ = ['<NAME>'] __licence__ = 'MIT' __version__ = '1.0.0' __maintainer__ =", "user, data): global gatherRunning if gatherRunning: global players for p in players: if", "def broadcastMessage(msg): for q in cmdToThread: q.put(msg) \"\"\" Init the app \"\"\" active", "\"!stop\": self.cmd_stop, \"!maps\": self.cmd_maps, \"!ready\": self.cmd_ready, \"!r\": self.cmd_ready, \"!gaben\": self.cmd_ready, \"!unready\": self.cmd_unready, \"!notready\":", "None self.channel = channel self.ti = index def run(self): self.telnet = self.initBot() self.botId", "channel): channelLobby = channel.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" % (channelLobby))) channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\")))", "gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color] : [i]Set the amount of maps to play (default=bo3)[/i]\\\\n\" \\", "self.telnet = self.initBot() self.botId = self.getPlayerId(self.name) self.channel = self.moveToChannel(self.getChannelId(self.channel)) # Print Welcome message", "data) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Sets a user as ready \"\"\"", "admins, vetoprocesses from queue import Queue from Player import Player # Amount of", "= l[:int(PLAYERS_NEEDED/2)] team2 = l[int(PLAYERS_NEEDED/2):] plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in", "running![/color]\") \"\"\" Sets a user as ready \"\"\" def cmd_ready(self, userid, user, data):", "index def run(self): self.telnet = self.initBot() self.botId = self.getPlayerId(self.name) self.channel = self.moveToChannel(self.getChannelId(self.channel)) #", "targetmode=3 target=1 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print a message to a specific", "of maps that will be played Only available to game mods! \"\"\" def", "\"bo3\" # Create lists with all the bots and their Queues cmdToThread =", "\"!ready\": self.cmd_ready, \"!r\": self.cmd_ready, \"!gaben\": self.cmd_ready, \"!unready\": self.cmd_unready, \"!notready\": self.cmd_unready, \"!nr\": self.cmd_unready, \"!ur\":", "gatherRunning if gatherRunning: global players for p in players: if p.uid == userid:", "while not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) # Read commands from user and execute them self.telnet.write(self.getenc(\"servernotifyregister", "__init__(self, name, password, channel, index): super(BotThread, self).__init__() self.commands = { # User commands", "set up gathers, all from your teamspeak 3 server. The bot requires access", "the Teamspeak 3 server query! \"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright 2015,", "amount of maps to play (default=bo3)[/i]\\\\n\" \"[color=green]!ready[/color] : [i]Sets you as ready[/i]\\\\n\" \"[color=green]!unready[/color]", "cmd.index(\"\\\\n\", i1) userid = cmd[i1 + 1:i2] cmd = [x.split(\"=\") for x in", "def getChannelId(self, channel): channelLobby = channel.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" % (channelLobby))) channelLobby", "\"\"\" class BotThread(threading.Thread): def __init__(self, name, password, channel, index): super(BotThread, self).__init__() self.commands =", "a message to a specific channel \"\"\" def sendChannelMessage(self, msg): msg = msg.replace(\"", "(!r, !gaben)[/color] : [i]Sets you as ready[/i]\\\\n\" \\ \"[color=green]!unready (!notready, !nr, !ur)[/color] :", "= msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Print", "\"!start\": self.cmd_start, \"!stop\": self.cmd_stop, \"!maps\": self.cmd_maps, \"!ready\": self.cmd_ready, \"!r\": self.cmd_ready, \"!gaben\": self.cmd_ready, \"!unready\":", "x in players] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"),", "= self.moveToChannel(self.getChannelId(self.channel)) # Print Welcome message self.sendChannelMessage( \"\\\\n[b]The GatherBot is currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color]", "a specific channel \"\"\" def sendChannelMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage", "of strings \"\"\" def getenc(self, str): return str.encode('ascii') \"\"\" Broadcast message to all", "broadcastMessage(\"[color=green]%s players are ready! Setting up teams![/color]\" % PLAYERS_NEEDED) l = players[:] import", "= channel self.ti = index def run(self): self.telnet = self.initBot() self.botId = self.getPlayerId(self.name)", "self.sendChannelMessage(\"[color=red]You can't leave your own gather. Use !stop to cancel it instead![/color]\") else:", "channel): self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\" % (self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\")) return channel \"\"\" Print out", "commands are:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\"", "leave your own gather. Use !stop to cancel it instead![/color]\") else: players.remove(p) else:", "[i]Starts a gather[/i]\\\\n\" \\ \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color] : [i]Set", "= x if gatherRunning and p.isMod: gatherRunning = False global vetoSystem vetoSystem =", "[i]Stops the gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color] : [i]Set the amount of maps to play", "Queue() ] bots = [ BotThread(config[\"user\"], config[\"pass\"], config[\"gl\"], 0), BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"], 1),", "it easier to set up gathers, all from your teamspeak 3 server. The", "broadcastMessage(\"[color=green]GatherBot has been activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot has been deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're not an", "with all the bots and their Queues cmdToThread = [ Queue(), Queue(), Queue()", "} self.name = name self.password = password self.telnet = None self.botId = None", "% (plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players = [] broadcastMessage(\"[color=red]Gather has been stopped![/color]\") else:", "userid = cmd[i1 + 1:i2] cmd = [x.split(\"=\") for x in cmd.split() if", "lobby \"\"\" def cmd_stop(self, userid, user, data): global gatherRunning global players p =", "self.sendChannelMessage(\"[color=red]You're already ready![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Set up teams, move", "True players = [] gatherRunning = False vetoSystem = \"bo3\" # Create lists", "sendChannelMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\"))", "else: self.sendChannelMessage(\"[color=red]You're not an admin, GTFO![/color]\") \"\"\" Fix encoding of strings \"\"\" def", "and log in to server telnet = telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login %s", "gather and move all players to lobby \"\"\" def cmd_stop(self, userid, user, data):", "def cmd_help(self, userid, user, data): string = \"\\\\n[b]Available commands are:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>)", "self.sendChannelMessage(\"[color=red]You didn't enter a value or you're not the game mod![/color]\" % data)", "client \"\"\" def initBot(self): # Connect and log in to server telnet =", "start veto process \"\"\" def start_gather(self): global players if len(players) == PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s", "in self.commands and active) or d['msg'] == '!activate': self.commands[cmdsp[0]](userid, d['invokername'], d['msg']) \"\"\" Start", "ready \"\"\" def cmd_ready(self, userid, user, data): global gatherRunning if gatherRunning: global players", "botstr.split()[0] return int(botstr.split(\"=\")[1]) \"\"\" Get the channel ID from the name of the", "is already running![/color]\") \"\"\" Stop the gather and move all players to lobby", "\"!r\": self.cmd_ready, \"!gaben\": self.cmd_ready, \"!unready\": self.cmd_unready, \"!notready\": self.cmd_unready, \"!nr\": self.cmd_unready, \"!ur\": self.cmd_unready, \"!help\":", "bot is closed, close all connections # before exiting thread self.closeBot() \"\"\" Connects", "the bot is activated Only available to admins! \"\"\" def cmd_activate(self, userid, user,", "\"\"\" def cmd_start(self, userid, user, data): global gatherRunning if not gatherRunning: gatherRunning =", "msg=%s\\n\" % (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse and execute commands sent by users \"\"\"", "running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] :", "for x in team1] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g1']))))", "and active) or d['msg'] == '!activate': self.commands[cmdsp[0]](userid, d['invokername'], d['msg']) \"\"\" Start gather and", "self.telnet.read_until(self.getenc(\"msg=ok\")) plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in team2] plrs = \"|\".join(plrs)", "is closed, close all connections # before exiting thread self.closeBot() \"\"\" Connects to", "# When the bot is closed, close all connections # before exiting thread", "string += \"\\\\n\\\\n\" \\ \"[b]Admin Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) : [i]<Description>[/i][/color]\\\\n\\\\n\" \\ \"[color=green]!activate[/color]", "l[int(PLAYERS_NEEDED/2):] plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in team1] plrs = \"|\".join(plrs)", "import telnetlib from Config import config, maps, admins, vetoprocesses from queue import Queue", "% (msg))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse and execute commands sent by users \"\"\" def", "= data[1].lower() if data in vetoprocesses: global vetoSystem vetoSystem = data broadcastMessage(\"[color=green]Game changed", "user, data): if userid in admins: global active active = not active if", "telnet.write(self.getenc(\"clientupdate client_nickname=%s\\n\" % (self.name))) telnet.read_until(self.getenc(\"msg=ok\")) return telnet \"\"\" Log out from telnet and", "your own gather. Use !stop to cancel it instead![/color]\") else: players.remove(p) else: self.sendChannelMessage(\"[color=red]No", "self.telnet.read_until(self.getenc(\"msg=ok\")) return channel \"\"\" Print out a server message \"\"\" def sendServerMessage(self, msg):", "vetoSystem vetoSystem = data broadcastMessage(\"[color=green]Game changed to %s![/color]\" % data) else: self.sendChannelMessage(\"[color=red]%s not", "= data broadcastMessage(\"[color=green]Game changed to %s![/color]\" % data) else: self.sendChannelMessage(\"[color=red]%s not supported![/color]\" %", "return telnet \"\"\" Log out from telnet and close the client \"\"\" def", "None self.botId = None self.channel = channel self.ti = index def run(self): self.telnet", "not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) # Read commands from user and execute them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel", "alreadyReady: players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s is ready![/color]\" % user) self.start_gather() else: self.sendChannelMessage(\"[color=red]You're already ready![/color]\")", "def getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" % name)) botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr = botstr.split()[0]", "cmd_start(self, userid, user, data): global gatherRunning if not gatherRunning: gatherRunning = True global", "gatherRunning if gatherRunning: global players alreadyReady = False for p in players: if", "= \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs = [\"clid=\" +", "else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Print help text to channel \"\"\" def", "= [x.split(\"=\") for x in cmd.split() if len(x.split(\"=\")) > 1 and not x.__contains__(\"msg=ok\")]", "channel.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" % (channelLobby))) channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby = channelLobby.split(\"\\\\n\")[1]", ": [i]Sets you as unready[/i]\\\\n\\\\n\" \\ \"[color=green]!help (!h)[/color] : [i]List all available commands[/i]\\\\n\"", "Stop the gather and move all players to lobby \"\"\" def cmd_stop(self, userid,", "to lobby \"\"\" def cmd_stop(self, userid, user, data): global gatherRunning global players p", "running![/color]\") \"\"\" Stop the gather and move all players to lobby \"\"\" def", "telnet and returns the telnet client \"\"\" def initBot(self): # Connect and log", "maps to play (default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready (!r, !gaben)[/color] : [i]Sets you as ready[/i]\\\\n\"", "self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players = [] broadcastMessage(\"[color=red]Gather has been stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No gather", "__version__ = '1.0.0' __maintainer__ = '<NAME>' __status__ = 'Production' import threading import telnetlib", "Fix encoding of strings \"\"\" def getenc(self, str): return str.encode('ascii') \"\"\" Broadcast message", "cmd = [x.split(\"=\") for x in cmd.split() if len(x.split(\"=\")) > 1 and not", "self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Change the amount of maps that will be", "broadcastMessage(\"[color=green]Game changed to %s![/color]\" % data) else: self.sendChannelMessage(\"[color=red]%s not supported![/color]\" % data) else:", "User commands \"!start\": self.cmd_start, \"!stop\": self.cmd_stop, \"!maps\": self.cmd_maps, \"!ready\": self.cmd_ready, \"!r\": self.cmd_ready, \"!gaben\":", "if gatherRunning: data = data.split(\"\\\\\\\\s\") global players p = None for x in", "3 server query! \"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright 2015, TS3GatherBot' __credits__", "admin, GTFO![/color]\") \"\"\" Fix encoding of strings \"\"\" def getenc(self, str): return str.encode('ascii')", "currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] : [i]Starts a gather[/i]\\\\n\" \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color]", "close all connections # before exiting thread self.closeBot() \"\"\" Connects to the teamspeak", "str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby = channelLobby.split(\"\\\\n\")[1] channelLobby = channelLobby.split()[0] return int(channelLobby.split(\"=\")[1]) \"\"\" Move user to", "python3 # -*- coding: utf-8 -*- \"\"\" Created by <NAME> This bot will", "return channel \"\"\" Print out a server message \"\"\" def sendServerMessage(self, msg): msg", "== '!activate': self.commands[cmdsp[0]](userid, d['invokername'], d['msg']) \"\"\" Start gather and set mod (the one", "x in players: if x.uid == userid: p = x if gatherRunning and", "\"!maps\": self.cmd_maps, \"!ready\": self.cmd_ready, \"!r\": self.cmd_ready, \"!gaben\": self.cmd_ready, \"!unready\": self.cmd_unready, \"!notready\": self.cmd_unready, \"!nr\":", "__maintainer__ = '<NAME>' __status__ = 'Production' import threading import telnetlib from Config import", "ex: # Print queue'd messages while not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) # Read commands from", "Change the amount of maps that will be played Only available to game", "not the game mod![/color]\" % data) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Sets", "gathers, all from your teamspeak 3 server. The bot requires access to the", "self.channel)) msg = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) # When the bot is closed,", "already running![/color]\") \"\"\" Stop the gather and move all players to lobby \"\"\"", "message \"\"\" def sendServerMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1", "\"!nr\": self.cmd_unready, \"!ur\": self.cmd_unready, \"!help\": self.cmd_help, \"!h\": self.cmd_help, # Admin commands \"!activate\": self.cmd_activate", "\"!unready\": self.cmd_unready, \"!notready\": self.cmd_unready, \"!nr\": self.cmd_unready, \"!ur\": self.cmd_unready, \"!help\": self.cmd_help, \"!h\": self.cmd_help, #", "execCommand(self, cmd): i1 = cmd.index(\"invokeruid\") i1 = cmd.index(\"=\", i1) i2 = cmd.index(\"\\\\n\", i1)", "def cmd_ready(self, userid, user, data): global gatherRunning if gatherRunning: global players alreadyReady =", "active if active: broadcastMessage(\"[color=green]GatherBot has been activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot has been deactivated[/color]\") else:", "Teamspeak 3 server query! \"\"\" __author__ = '<NAME>' __copyright__ = 'Copyright 2015, TS3GatherBot'", "BotThread(config[\"user1\"], config[\"pass1\"], config[\"g1\"], 1), BotThread(config[\"user2\"], config[\"pass2\"], config[\"g2\"], 2) ] for b in bots:", "available commands[/i]\\\\n\" if userid in admins.keys(): string += \"\\\\n\\\\n\" \\ \"[b]Admin Commands:[/b]\\\\n\" \\", "plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets a", "been started by %s![/color]\" % user) else: self.sendChannelMessage(\"[color=red]A gather is already running![/color]\") \"\"\"", "name of the channel \"\"\" def getChannelId(self, channel): channelLobby = channel.replace(\" \", \"\\s\")", "maps that will be played Only available to game mods! \"\"\" def cmd_maps(self,", "if p.uid == userid: if p.isMod: self.sendChannelMessage(\"[color=red]You can't leave your own gather. Use", "userid in admins.keys(): string += \"\\\\n\\\\n\" \\ \"[b]Admin Commands:[/b]\\\\n\" \\ \"[color=grey]!<cmd> (<aliases>) :", "Thread \"\"\" class BotThread(threading.Thread): def __init__(self, name, password, channel, index): super(BotThread, self).__init__() self.commands", "self.cmd_stop, \"!maps\": self.cmd_maps, \"!ready\": self.cmd_ready, \"!r\": self.cmd_ready, \"!gaben\": self.cmd_ready, \"!unready\": self.cmd_unready, \"!notready\": self.cmd_unready,", "to Lobby plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in players] plrs =", "else: players.remove(p) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Print help text to channel", "Get the channel ID from the name of the channel \"\"\" def getChannelId(self,", "commands \"!start\": self.cmd_start, \"!stop\": self.cmd_stop, \"!maps\": self.cmd_maps, \"!ready\": self.cmd_ready, \"!r\": self.cmd_ready, \"!gaben\": self.cmd_ready,", "the telnet client \"\"\" def initBot(self): # Connect and log in to server", "Use !stop to cancel it instead![/color]\") else: players.remove(p) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\")", "config, maps, admins, vetoprocesses from queue import Queue from Player import Player #", "len(players) == PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players are ready! Setting up teams![/color]\" % PLAYERS_NEEDED) l", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created by <NAME> This bot", "out a server message \"\"\" def sendServerMessage(self, msg): msg = msg.replace(\" \", \"\\s\")", "server. The bot requires access to the Teamspeak 3 server query! \"\"\" __author__", "in cmd: d[it[0]] = it[1] global active cmdsp = d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0] in", "l[:int(PLAYERS_NEEDED/2)] team2 = l[int(PLAYERS_NEEDED/2):] plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in team1]", "been activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot has been deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're not an admin, GTFO![/color]\")", "self).__init__() self.commands = { # User commands \"!start\": self.cmd_start, \"!stop\": self.cmd_stop, \"!maps\": self.cmd_maps,", "log in to server telnet = telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login %s %s\\n\"", "else: self.sendChannelMessage(\"[color=red]You're already ready![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Set up teams,", "x if len(data) > 1 and p.isMod: data = data[1].lower() if data in", "whether the bot is activated Only available to admins! \"\"\" def cmd_activate(self, userid,", "<reponame>ikinz/TS3GatherBot #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created by <NAME> This", "While an exit command has not been issued ex = False while not", "in players: if p.uid == userid: alreadyReady = True if not alreadyReady: players.append(Player(user,", "channelLobby.split(\"\\\\n\")[1] channelLobby = channelLobby.split()[0] return int(channelLobby.split(\"=\")[1]) \"\"\" Move user to channel \"\"\" def", "commands from user and execute them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\" % self.channel)) msg =", "None for x in players: if x.uid == userid: p = x if", ": [i]Stops the gather[/i]\\\\n\\\\n\" \\ \"[color=green]!maps[/color] : [i]Set the amount of maps to", "Bot Thread \"\"\" class BotThread(threading.Thread): def __init__(self, name, password, channel, index): super(BotThread, self).__init__()", "self.telnet.write(self.getenc(\"channelfind pattern=%s\\n\" % (channelLobby))) channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby = channelLobby.split(\"\\\\n\")[1] channelLobby = channelLobby.split()[0]", "+ str(self.getPlayerId(x.name)) for x in players] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" %", "user, data): global gatherRunning if gatherRunning: data = data.split(\"\\\\\\\\s\") global players p =", "play (default=bo3)[/i]\\\\n\" \\ \"[color=green]!ready (!r, !gaben)[/color] : [i]Sets you as ready[/i]\\\\n\" \\ \"[color=green]!unready", "= '<NAME>' __copyright__ = 'Copyright 2015, TS3GatherBot' __credits__ = ['<NAME>'] __licence__ = 'MIT'", ": [i]Stops the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] : [i]Set the amount of maps to play", "def sendServerMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1 msg=%s\\n\" %", "cmd: d[it[0]] = it[1] global active cmdsp = d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0] in self.commands", "[i]Starts a gather[/i]\\\\n\" \"[color=green]!stop[/color] : [i]Stops the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] : [i]Set the amount", "issued ex = False while not ex: # Print queue'd messages while not", "self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs = [\"clid=\" + str(self.getPlayerId(x.name)) for x in team2] plrs =", "if active: broadcastMessage(\"[color=green]GatherBot has been activated[/color]\") else: broadcastMessage(\"[color=red]GatherBot has been deactivated[/color]\") else: self.sendChannelMessage(\"[color=red]You're", "getenc(self, str): return str.encode('ascii') \"\"\" Broadcast message to all bots \"\"\" def broadcastMessage(msg):", "access to the Teamspeak 3 server query! \"\"\" __author__ = '<NAME>' __copyright__ =", "commands sent by users \"\"\" def execCommand(self, cmd): i1 = cmd.index(\"invokeruid\") i1 =", "= 'MIT' __version__ = '1.0.0' __maintainer__ = '<NAME>' __status__ = 'Production' import threading", "global gatherRunning if not gatherRunning: gatherRunning = True global players players.append(Player(user, userid, True))", "# Create lists with all the bots and their Queues cmdToThread = [", "as unready[/i]\\\\n\\\\n\" \"[color=red]Please type !help for a full list of commands[/color]\" ) #", "Create lists with all the bots and their Queues cmdToThread = [ Queue(),", "i1 = cmd.index(\"=\", i1) i2 = cmd.index(\"\\\\n\", i1) userid = cmd[i1 + 1:i2]", "(channelLobby))) channelLobby = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby = channelLobby.split(\"\\\\n\")[1] channelLobby = channelLobby.split()[0] return int(channelLobby.split(\"=\")[1]) \"\"\"", "if x.uid == userid: p = x if len(data) > 1 and p.isMod:", "ready[/i]\\\\n\" \"[color=green]!unready[/color] : [i]Sets you as unready[/i]\\\\n\\\\n\" \"[color=red]Please type !help for a full", "data): global gatherRunning if gatherRunning: global players for p in players: if p.uid", "= [\"clid=\" + str(self.getPlayerId(x.name)) for x in team1] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s", "players if len(players) == PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players are ready! Setting up teams![/color]\" %", "def initBot(self): # Connect and log in to server telnet = telnetlib.Telnet(config[\"host\"], config[\"port\"])", "d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0] in self.commands and active) or d['msg'] == '!activate': self.commands[cmdsp[0]](userid, d['invokername'],", "else: self.sendChannelMessage(\"[color=red]You didn't enter a value or you're not the game mod![/color]\" %", "and move all players to lobby \"\"\" def cmd_stop(self, userid, user, data): global", "global players for p in players: if p.uid == userid: if p.isMod: self.sendChannelMessage(\"[color=red]You", "for a full list of commands[/color]\" ) # While an exit command has", "players needed to start gather (even number please :)) PLAYERS_NEEDED = config[\"players\"] \"\"\"", "self.moveToChannel(self.getChannelId(self.channel)) # Print Welcome message self.sendChannelMessage( \"\\\\n[b]The GatherBot is currently running[/b]\\\\n\\\\n\" \"[color=green]!start[/color] :", "not ready \"\"\" def cmd_unready(self, userid, user, data): global gatherRunning if gatherRunning: global", "self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets a player as not", "channel \"\"\" def sendChannelMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2 msg=%s\\n\"", "coding: utf-8 -*- \"\"\" Created by <NAME> This bot will make it easier", "= channelLobby.split(\"\\\\n\")[1] channelLobby = channelLobby.split()[0] return int(channelLobby.split(\"=\")[1]) \"\"\" Move user to channel \"\"\"", "# Amount of players needed to start gather (even number please :)) PLAYERS_NEEDED", "a value or you're not the game mod![/color]\" % data) else: self.sendChannelMessage(\"[color=red]No gather", "message to a specific channel \"\"\" def sendChannelMessage(self, msg): msg = msg.replace(\" \",", "data): global gatherRunning if not gatherRunning: gatherRunning = True global players players.append(Player(user, userid,", "gatherRunning: global players alreadyReady = False for p in players: if p.uid ==", "players are ready! Setting up teams![/color]\" % PLAYERS_NEEDED) l = players[:] import random", "an exit command has not been issued ex = False while not ex:", "plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g1'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) plrs = [\"clid=\"", "Print help text to channel \"\"\" def cmd_help(self, userid, user, data): string =", "= telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login %s %s\\n\" % (self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\")) #", "or d['msg'] == '!activate': self.commands[cmdsp[0]](userid, d['invokername'], d['msg']) \"\"\" Start gather and set mod", "TS3GatherBot' __credits__ = ['<NAME>'] __licence__ = 'MIT' __version__ = '1.0.0' __maintainer__ = '<NAME>'", "to channel \"\"\" def moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\" % (self.botId, channel))) self.telnet.read_until(self.getenc(\"msg=ok\"))", "the name of the channel \"\"\" def getChannelId(self, channel): channelLobby = channel.replace(\" \",", "by <NAME> This bot will make it easier to set up gathers, all", "sendServerMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=3 target=1 msg=%s\\n\" % (msg)))", "players alreadyReady = False for p in players: if p.uid == userid: alreadyReady", "lists with all the bots and their Queues cmdToThread = [ Queue(), Queue(),", "bot is activated Only available to admins! \"\"\" def cmd_activate(self, userid, user, data):", "= str(self.telnet.read_until(self.getenc(\"msg=ok\"))) channelLobby = channelLobby.split(\"\\\\n\")[1] channelLobby = channelLobby.split()[0] return int(channelLobby.split(\"=\")[1]) \"\"\" Move user", "% data) else: self.sendChannelMessage(\"[color=red]%s not supported![/color]\" % data) else: self.sendChannelMessage(\"[color=red]You didn't enter a", "userid)) broadcastMessage(\"[color=green]%s is ready![/color]\" % user) self.start_gather() else: self.sendChannelMessage(\"[color=red]You're already ready![/color]\") else: self.sendChannelMessage(\"[color=red]No", "team2] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['g2'])))) self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Sets", "x.uid == userid: p = x if gatherRunning and p.isMod: gatherRunning = False", "def cmd_activate(self, userid, user, data): if userid in admins: global active active =", "x in players: if x.uid == userid: p = x if len(data) >", "[] broadcastMessage(\"[color=red]Gather has been stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Change the", "data[1].lower() if data in vetoprocesses: global vetoSystem vetoSystem = data broadcastMessage(\"[color=green]Game changed to", "= [] broadcastMessage(\"[color=red]Gather has been stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Change", "PLAYERS_NEEDED: broadcastMessage(\"[color=green]%s players are ready! Setting up teams![/color]\" % PLAYERS_NEEDED) l = players[:]", "and execute them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\" % self.channel)) msg = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"):", "Move user to channel \"\"\" def moveToChannel(self, channel): self.telnet.write(self.getenc(\"clientmove clid=%s cid=%s\\n\" % (self.botId,", "__author__ = '<NAME>' __copyright__ = 'Copyright 2015, TS3GatherBot' __credits__ = ['<NAME>'] __licence__ =", "self.cmd_unready, \"!ur\": self.cmd_unready, \"!help\": self.cmd_help, \"!h\": self.cmd_help, # Admin commands \"!activate\": self.cmd_activate }", "[i]Stops the gather[/i]\\\\n\\\\n\" \"[color=green]!maps[/color] : [i]Set the amount of maps to play (default=bo3)[/i]\\\\n\"", "you're not the game mod![/color]\" % data) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\"", "\"\"\" Init the app \"\"\" active = True players = [] gatherRunning =", "Connect and log in to server telnet = telnetlib.Telnet(config[\"host\"], config[\"port\"]) telnet.open(telnet.host, telnet.port) telnet.write(self.getenc(\"login", "players to correct channel and start veto process \"\"\" def start_gather(self): global players", "= x if len(data) > 1 and p.isMod: data = data[1].lower() if data", "from user and execute them self.telnet.write(self.getenc(\"servernotifyregister event=textchannel id=%s\\n\" % self.channel)) msg = str(self.telnet.read_until(self.getenc(\"msg=ok\")))", "len(data) > 1 and p.isMod: data = data[1].lower() if data in vetoprocesses: global", "else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\") \"\"\" Sets a user as ready \"\"\" def", "specific channel \"\"\" def sendChannelMessage(self, msg): msg = msg.replace(\" \", \"\\s\") self.telnet.write(self.getenc(\"sendtextmessage targetmode=2", "while not ex: # Print queue'd messages while not cmdToThread[self.ti].empty(): self.sendChannelMessage(cmdToThread[self.ti].get()) # Read", "enter a value or you're not the game mod![/color]\" % data) else: self.sendChannelMessage(\"[color=red]No", "str(self.telnet.read_until(self.getenc(\"msg=ok\"))) if msg.__contains__(\"notifytextmessage\"): self.execCommand(msg) # When the bot is closed, close all connections", "self.telnet.read_until(self.getenc(\"msg=ok\")) \"\"\" Parse and execute commands sent by users \"\"\" def execCommand(self, cmd):", "is ready![/color]\" % user) self.start_gather() else: self.sendChannelMessage(\"[color=red]You're already ready![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently", "it[1] global active cmdsp = d['msg'].split(\"\\\\\\\\s\") if (cmdsp[0] in self.commands and active) or", "cmd_unready(self, userid, user, data): global gatherRunning if gatherRunning: global players for p in", "# -*- coding: utf-8 -*- \"\"\" Created by <NAME> This bot will make", "for x in players] plrs = \"|\".join(plrs) self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['gl']))))", "3) players = [] broadcastMessage(\"[color=red]Gather has been stopped![/color]\") else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\")", "self.sendChannelMessage(\"[color=red]A gather is already running![/color]\") \"\"\" Stop the gather and move all players", ":)) PLAYERS_NEEDED = config[\"players\"] \"\"\" Bot Thread \"\"\" class BotThread(threading.Thread): def __init__(self, name,", "(self.name, self.password))) telnet.read_until(self.getenc(\"msg=ok\")) # Select virtual server id telnet.write(self.getenc(\"use sid=%s\\n\" % (config[\"sid\"]))) telnet.read_until(self.getenc(\"msg=ok\"))", "self.telnet.write(self.getenc(\"clientmove %s cid=%s\\n\" % (plrs, self.getChannelId(config['gl'])))) self.telnet.read_until(self.getenc(\"msg=ok\"), 3) players = [] broadcastMessage(\"[color=red]Gather has", "Toggle whether the bot is activated Only available to admins! \"\"\" def cmd_activate(self,", "[i]List all available commands[/i]\\\\n\" if userid in admins.keys(): string += \"\\\\n\\\\n\" \\ \"[b]Admin", "broadcastMessage(\"[color=green]%s is ready![/color]\" % user) self.start_gather() else: self.sendChannelMessage(\"[color=red]You're already ready![/color]\") else: self.sendChannelMessage(\"[color=red]No gather", "= 'Production' import threading import telnetlib from Config import config, maps, admins, vetoprocesses", "start gather (even number please :)) PLAYERS_NEEDED = config[\"players\"] \"\"\" Bot Thread \"\"\"", "self.sendChannelMessage(\"[color=red]You're not an admin, GTFO![/color]\") \"\"\" Fix encoding of strings \"\"\" def getenc(self,", "data = data[1].lower() if data in vetoprocesses: global vetoSystem vetoSystem = data broadcastMessage(\"[color=green]Game", "from the name of the channel \"\"\" def getChannelId(self, channel): channelLobby = channel.replace(\"", "client ID for this bot \"\"\" def getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" % name))", "or you're not the game mod![/color]\" % data) else: self.sendChannelMessage(\"[color=red]No gather currently running![/color]\")", "Amount of players needed to start gather (even number please :)) PLAYERS_NEEDED =", "userid: alreadyReady = True if not alreadyReady: players.append(Player(user, userid)) broadcastMessage(\"[color=green]%s is ready![/color]\" %", "can't leave your own gather. Use !stop to cancel it instead![/color]\") else: players.remove(p)", "Sets a player as not ready \"\"\" def cmd_unready(self, userid, user, data): global", "name)) botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr = botstr.split()[0] return int(botstr.split(\"=\")[1]) \"\"\" Get the channel", "getPlayerId(self, name): self.telnet.write(self.getenc(\"clientfind pattern=%s\\n\" % name)) botstr = str(self.telnet.read_until(self.getenc(\"msg=ok\"))) botstr = botstr.split()[0] return", "game mods! \"\"\" def cmd_maps(self, userid, user, data): global gatherRunning if gatherRunning: data", "True)) broadcastMessage(\"[color=green]A gather has been started by %s![/color]\" % user) else: self.sendChannelMessage(\"[color=red]A gather" ]
[ "Yellow circle for the head # Two black circle eyes # Red rectangle", "simple smiley face # Yellow circle for the head # Two black circle", "drawing a simple smiley face # Yellow circle for the head # Two", "pygame.draw.rect(screen, color, (x, y, width, height), thickness) # pygame.draw.rect(screen, (100, 0, 0), (240,", "20) pygame.draw.circle(screen, (0, 0, 0), (400, 200), 20) # draws the nose pygame.draw.circle(screen,", "35) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 35, 2) # draws the mouth", "color, (x, y, width, height), thickness) # pygame.draw.rect(screen, (100, 0, 0), (240, 350,", "black circle eyes # Red rectangle (rect) mouth # Red circle nose. import", "pygame import sys pygame.init() screen = pygame.display.set_mode((600, 600)) while True: for event in", "(0, 0, 0), (400, 200), 20) # draws the nose pygame.draw.circle(screen, (255, 0,", "600)) while True: for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: print(event.pos) if", "# draws the mouth pygame.draw.rect(screen, (127, 0, 0), (200, 400, 200, 25)) #", "(255,255,0), (300,300), 250) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 250, 5) # draws", "circle nose. import pygame import sys pygame.init() screen = pygame.display.set_mode((600, 600)) while True:", "# Two black circle eyes # Red rectangle (rect) mouth # Red circle", "face # Yellow circle for the head # Two black circle eyes #", "the yellow head pygame.draw.circle(screen, (255,255,0), (300,300), 250) pygame.draw.circle(screen, (0, 0, 0), (300, 300),", "0, 0), (300, 300), 35, 2) # draws the mouth pygame.draw.rect(screen, (127, 0,", "Red circle nose. import pygame import sys pygame.init() screen = pygame.display.set_mode((600, 600)) while", "a simple smiley face # Yellow circle for the head # Two black", "(400, 200), 20) # draws the nose pygame.draw.circle(screen, (255, 0, 0), (300, 300),", "# Yellow circle for the head # Two black circle eyes # Red", "In this module we'll start drawing a simple smiley face # Yellow circle", "in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: print(event.pos) if event.type == pygame.QUIT: sys.exit() screen.fill((0,", "this module we'll start drawing a simple smiley face # Yellow circle for", "pygame.draw.circle(screen, (0, 0, 0), (205, 200), 20) pygame.draw.circle(screen, (0, 0, 0), (400, 200),", "(rect) mouth # Red circle nose. import pygame import sys pygame.init() screen =", "0), (300, 300), 250, 5) # draws the eyes pygame.draw.circle(screen, (0, 0, 0),", "20) # draws the nose pygame.draw.circle(screen, (255, 0, 0), (300, 300), 35) pygame.draw.circle(screen,", "pygame.draw.rect(screen, (127, 0, 0), (200, 400, 200, 25)) # pygame.draw.rect(screen, color, (x, y,", "Red rectangle (rect) mouth # Red circle nose. import pygame import sys pygame.init()", "the head # Two black circle eyes # Red rectangle (rect) mouth #", "(x, y, width, height), thickness) # pygame.draw.rect(screen, (100, 0, 0), (240, 350, 160,", "pygame.MOUSEBUTTONDOWN: print(event.pos) if event.type == pygame.QUIT: sys.exit() screen.fill((0, 200, 200)) # Draws the", "Draws the yellow head pygame.draw.circle(screen, (255,255,0), (300,300), 250) pygame.draw.circle(screen, (0, 0, 0), (300,", "draws the eyes pygame.draw.circle(screen, (0, 0, 0), (205, 200), 20) pygame.draw.circle(screen, (0, 0,", "250, 5) # draws the eyes pygame.draw.circle(screen, (0, 0, 0), (205, 200), 20)", "(300, 300), 35, 2) # draws the mouth pygame.draw.rect(screen, (127, 0, 0), (200,", "2) # draws the mouth pygame.draw.rect(screen, (127, 0, 0), (200, 400, 200, 25))", "(127, 0, 0), (200, 400, 200, 25)) # pygame.draw.rect(screen, color, (x, y, width,", "# draws the eyes pygame.draw.circle(screen, (0, 0, 0), (205, 200), 20) pygame.draw.circle(screen, (0,", "0), (300, 300), 35, 2) # draws the mouth pygame.draw.rect(screen, (127, 0, 0),", "0, 0), (205, 200), 20) pygame.draw.circle(screen, (0, 0, 0), (400, 200), 20) #", "# Red circle nose. import pygame import sys pygame.init() screen = pygame.display.set_mode((600, 600))", "0, 0), (400, 200), 20) # draws the nose pygame.draw.circle(screen, (255, 0, 0),", "5) # draws the eyes pygame.draw.circle(screen, (0, 0, 0), (205, 200), 20) pygame.draw.circle(screen,", "yellow head pygame.draw.circle(screen, (255,255,0), (300,300), 250) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 250,", "the nose pygame.draw.circle(screen, (255, 0, 0), (300, 300), 35) pygame.draw.circle(screen, (0, 0, 0),", "200)) # Draws the yellow head pygame.draw.circle(screen, (255,255,0), (300,300), 250) pygame.draw.circle(screen, (0, 0,", "(300, 300), 250, 5) # draws the eyes pygame.draw.circle(screen, (0, 0, 0), (205,", "0), (200, 400, 200, 25)) # pygame.draw.rect(screen, color, (x, y, width, height), thickness)", "import pygame import sys pygame.init() screen = pygame.display.set_mode((600, 600)) while True: for event", "(0, 0, 0), (205, 200), 20) pygame.draw.circle(screen, (0, 0, 0), (400, 200), 20)", "Two black circle eyes # Red rectangle (rect) mouth # Red circle nose.", "0), (300, 300), 35) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 35, 2) #", "while True: for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: print(event.pos) if event.type", "sys.exit() screen.fill((0, 200, 200)) # Draws the yellow head pygame.draw.circle(screen, (255,255,0), (300,300), 250)", "import sys pygame.init() screen = pygame.display.set_mode((600, 600)) while True: for event in pygame.event.get():", "250) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 250, 5) # draws the eyes", "0), (205, 200), 20) pygame.draw.circle(screen, (0, 0, 0), (400, 200), 20) # draws", "200, 25)) # pygame.draw.rect(screen, color, (x, y, width, height), thickness) # pygame.draw.rect(screen, (100,", "pygame.draw.circle(screen, (255, 0, 0), (300, 300), 35) pygame.draw.circle(screen, (0, 0, 0), (300, 300),", "draws the nose pygame.draw.circle(screen, (255, 0, 0), (300, 300), 35) pygame.draw.circle(screen, (0, 0,", "module we'll start drawing a simple smiley face # Yellow circle for the", "300), 250, 5) # draws the eyes pygame.draw.circle(screen, (0, 0, 0), (205, 200),", "TODO: In this module we'll start drawing a simple smiley face # Yellow", "200, 200)) # Draws the yellow head pygame.draw.circle(screen, (255,255,0), (300,300), 250) pygame.draw.circle(screen, (0,", "if event.type == pygame.QUIT: sys.exit() screen.fill((0, 200, 200)) # Draws the yellow head", "pygame.draw.circle(screen, (0, 0, 0), (300, 300), 250, 5) # draws the eyes pygame.draw.circle(screen,", "300), 35) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 35, 2) # draws the", "0, 0), (300, 300), 35) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 35, 2)", "(0, 0, 0), (300, 300), 35, 2) # draws the mouth pygame.draw.rect(screen, (127,", "circle eyes # Red rectangle (rect) mouth # Red circle nose. import pygame", "# TODO: In this module we'll start drawing a simple smiley face #", "head # Two black circle eyes # Red rectangle (rect) mouth # Red", "= pygame.display.set_mode((600, 600)) while True: for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN:", "# pygame.draw.rect(screen, color, (x, y, width, height), thickness) # pygame.draw.rect(screen, (100, 0, 0),", "sys pygame.init() screen = pygame.display.set_mode((600, 600)) while True: for event in pygame.event.get(): if", "(300, 300), 35) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 35, 2) # draws", "True: for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: print(event.pos) if event.type ==", "nose. import pygame import sys pygame.init() screen = pygame.display.set_mode((600, 600)) while True: for", "pygame.display.set_mode((600, 600)) while True: for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: print(event.pos)", "# Draws the yellow head pygame.draw.circle(screen, (255,255,0), (300,300), 250) pygame.draw.circle(screen, (0, 0, 0),", "event.type == pygame.QUIT: sys.exit() screen.fill((0, 200, 200)) # Draws the yellow head pygame.draw.circle(screen,", "mouth pygame.draw.rect(screen, (127, 0, 0), (200, 400, 200, 25)) # pygame.draw.rect(screen, color, (x,", "pygame.draw.circle(screen, (255,255,0), (300,300), 250) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 250, 5) #", "print(event.pos) if event.type == pygame.QUIT: sys.exit() screen.fill((0, 200, 200)) # Draws the yellow", "400, 200, 25)) # pygame.draw.rect(screen, color, (x, y, width, height), thickness) # pygame.draw.rect(screen,", "screen.fill((0, 200, 200)) # Draws the yellow head pygame.draw.circle(screen, (255,255,0), (300,300), 250) pygame.draw.circle(screen,", "pygame.QUIT: sys.exit() screen.fill((0, 200, 200)) # Draws the yellow head pygame.draw.circle(screen, (255,255,0), (300,300),", "smiley face # Yellow circle for the head # Two black circle eyes", "== pygame.MOUSEBUTTONDOWN: print(event.pos) if event.type == pygame.QUIT: sys.exit() screen.fill((0, 200, 200)) # Draws", "head pygame.draw.circle(screen, (255,255,0), (300,300), 250) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 250, 5)", "if event.type == pygame.MOUSEBUTTONDOWN: print(event.pos) if event.type == pygame.QUIT: sys.exit() screen.fill((0, 200, 200))", "event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: print(event.pos) if event.type == pygame.QUIT: sys.exit()", "width, height), thickness) # pygame.draw.rect(screen, (100, 0, 0), (240, 350, 160, 30)) pygame.display.update()", "for the head # Two black circle eyes # Red rectangle (rect) mouth", "for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: print(event.pos) if event.type == pygame.QUIT:", "eyes # Red rectangle (rect) mouth # Red circle nose. import pygame import", "0), (400, 200), 20) # draws the nose pygame.draw.circle(screen, (255, 0, 0), (300,", "(200, 400, 200, 25)) # pygame.draw.rect(screen, color, (x, y, width, height), thickness) #", "rectangle (rect) mouth # Red circle nose. import pygame import sys pygame.init() screen", "# draws the nose pygame.draw.circle(screen, (255, 0, 0), (300, 300), 35) pygame.draw.circle(screen, (0,", "0, 0), (300, 300), 250, 5) # draws the eyes pygame.draw.circle(screen, (0, 0,", "# Red rectangle (rect) mouth # Red circle nose. import pygame import sys", "(0, 0, 0), (300, 300), 250, 5) # draws the eyes pygame.draw.circle(screen, (0,", "nose pygame.draw.circle(screen, (255, 0, 0), (300, 300), 35) pygame.draw.circle(screen, (0, 0, 0), (300,", "(205, 200), 20) pygame.draw.circle(screen, (0, 0, 0), (400, 200), 20) # draws the", "(255, 0, 0), (300, 300), 35) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 35,", "25)) # pygame.draw.rect(screen, color, (x, y, width, height), thickness) # pygame.draw.rect(screen, (100, 0,", "pygame.draw.circle(screen, (0, 0, 0), (300, 300), 35, 2) # draws the mouth pygame.draw.rect(screen,", "mouth # Red circle nose. import pygame import sys pygame.init() screen = pygame.display.set_mode((600,", "200), 20) pygame.draw.circle(screen, (0, 0, 0), (400, 200), 20) # draws the nose", "== pygame.QUIT: sys.exit() screen.fill((0, 200, 200)) # Draws the yellow head pygame.draw.circle(screen, (255,255,0),", "pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN: print(event.pos) if event.type == pygame.QUIT: sys.exit() screen.fill((0, 200,", "event.type == pygame.MOUSEBUTTONDOWN: print(event.pos) if event.type == pygame.QUIT: sys.exit() screen.fill((0, 200, 200)) #", "the mouth pygame.draw.rect(screen, (127, 0, 0), (200, 400, 200, 25)) # pygame.draw.rect(screen, color,", "the eyes pygame.draw.circle(screen, (0, 0, 0), (205, 200), 20) pygame.draw.circle(screen, (0, 0, 0),", "screen = pygame.display.set_mode((600, 600)) while True: for event in pygame.event.get(): if event.type ==", "eyes pygame.draw.circle(screen, (0, 0, 0), (205, 200), 20) pygame.draw.circle(screen, (0, 0, 0), (400,", "0, 0), (200, 400, 200, 25)) # pygame.draw.rect(screen, color, (x, y, width, height),", "35, 2) # draws the mouth pygame.draw.rect(screen, (127, 0, 0), (200, 400, 200,", "circle for the head # Two black circle eyes # Red rectangle (rect)", "300), 35, 2) # draws the mouth pygame.draw.rect(screen, (127, 0, 0), (200, 400,", "start drawing a simple smiley face # Yellow circle for the head #", "y, width, height), thickness) # pygame.draw.rect(screen, (100, 0, 0), (240, 350, 160, 30))", "pygame.init() screen = pygame.display.set_mode((600, 600)) while True: for event in pygame.event.get(): if event.type", "draws the mouth pygame.draw.rect(screen, (127, 0, 0), (200, 400, 200, 25)) # pygame.draw.rect(screen,", "200), 20) # draws the nose pygame.draw.circle(screen, (255, 0, 0), (300, 300), 35)", "pygame.draw.circle(screen, (0, 0, 0), (400, 200), 20) # draws the nose pygame.draw.circle(screen, (255,", "we'll start drawing a simple smiley face # Yellow circle for the head", "(300,300), 250) pygame.draw.circle(screen, (0, 0, 0), (300, 300), 250, 5) # draws the" ]
[ "if output_diff: msg = \"The two sequential reads should produce either equal set", "source_acceptance_test.base import BaseTest from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20 * 60) class", "source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20 * 60) class TestFullRefresh(BaseTest): def test_sequential_reads(self, connector_config,", "import Type from source_acceptance_test.base import BaseTest from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20", "Type from source_acceptance_test.base import BaseTest from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20 *", "records or one of them is a strict subset of the other\" detailed_logger.info(msg)", "in output if message.type == Type.RECORD] output_diff = set(map(serialize, records_1)) - set(map(serialize, records_2))", "TestFullRefresh(BaseTest): def test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger): configured_catalog = full_refresh_only_catalog(configured_catalog) output =", "message.type == Type.RECORD] output_diff = set(map(serialize, records_1)) - set(map(serialize, records_2)) if output_diff: msg", "output if message.type == Type.RECORD] output = docker_runner.call_read(connector_config, configured_catalog) records_2 = [message.record.data for", "set of records or one of them is a strict subset of the", "60) class TestFullRefresh(BaseTest): def test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger): configured_catalog = full_refresh_only_catalog(configured_catalog)", "full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20 * 60) class TestFullRefresh(BaseTest): def test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner,", "configured_catalog) records_1 = [message.record.data for message in output if message.type == Type.RECORD] output", "reads should produce either equal set of records or one of them is", "configured_catalog) records_2 = [message.record.data for message in output if message.type == Type.RECORD] output_diff", "msg = \"The two sequential reads should produce either equal set of records", "2021 Airbyte, Inc., all rights reserved. # import pytest from airbyte_cdk.models import Type", "all rights reserved. # import pytest from airbyte_cdk.models import Type from source_acceptance_test.base import", "reserved. # import pytest from airbyte_cdk.models import Type from source_acceptance_test.base import BaseTest from", "produce either equal set of records or one of them is a strict", "docker_runner: ConnectorRunner, detailed_logger): configured_catalog = full_refresh_only_catalog(configured_catalog) output = docker_runner.call_read(connector_config, configured_catalog) records_1 = [message.record.data", "of records or one of them is a strict subset of the other\"", "Type.RECORD] output_diff = set(map(serialize, records_1)) - set(map(serialize, records_2)) if output_diff: msg = \"The", "either equal set of records or one of them is a strict subset", "ConnectorRunner, detailed_logger): configured_catalog = full_refresh_only_catalog(configured_catalog) output = docker_runner.call_read(connector_config, configured_catalog) records_1 = [message.record.data for", "equal set of records or one of them is a strict subset of", "import pytest from airbyte_cdk.models import Type from source_acceptance_test.base import BaseTest from source_acceptance_test.utils import", "# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import pytest", "configured_catalog = full_refresh_only_catalog(configured_catalog) output = docker_runner.call_read(connector_config, configured_catalog) records_1 = [message.record.data for message in", "output_diff: msg = \"The two sequential reads should produce either equal set of", "def test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger): configured_catalog = full_refresh_only_catalog(configured_catalog) output = docker_runner.call_read(connector_config,", "= [message.record.data for message in output if message.type == Type.RECORD] output = docker_runner.call_read(connector_config,", "= set(map(serialize, records_1)) - set(map(serialize, records_2)) if output_diff: msg = \"The two sequential", "= docker_runner.call_read(connector_config, configured_catalog) records_1 = [message.record.data for message in output if message.type ==", "output = docker_runner.call_read(connector_config, configured_catalog) records_2 = [message.record.data for message in output if message.type", "for message in output if message.type == Type.RECORD] output_diff = set(map(serialize, records_1)) -", "Airbyte, Inc., all rights reserved. # import pytest from airbyte_cdk.models import Type from", "set(map(serialize, records_1)) - set(map(serialize, records_2)) if output_diff: msg = \"The two sequential reads", "ConnectorRunner, full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20 * 60) class TestFullRefresh(BaseTest): def test_sequential_reads(self, connector_config, configured_catalog, docker_runner:", "BaseTest from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20 * 60) class TestFullRefresh(BaseTest): def", "= [message.record.data for message in output if message.type == Type.RECORD] output_diff = set(map(serialize,", "if message.type == Type.RECORD] output_diff = set(map(serialize, records_1)) - set(map(serialize, records_2)) if output_diff:", "configured_catalog, docker_runner: ConnectorRunner, detailed_logger): configured_catalog = full_refresh_only_catalog(configured_catalog) output = docker_runner.call_read(connector_config, configured_catalog) records_1 =", "set(map(serialize, records_2)) if output_diff: msg = \"The two sequential reads should produce either", "or one of them is a strict subset of the other\" detailed_logger.info(msg) detailed_logger.log_json_list(output_diff)", "== Type.RECORD] output_diff = set(map(serialize, records_1)) - set(map(serialize, records_2)) if output_diff: msg =", "- set(map(serialize, records_2)) if output_diff: msg = \"The two sequential reads should produce", "connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger): configured_catalog = full_refresh_only_catalog(configured_catalog) output = docker_runner.call_read(connector_config, configured_catalog) records_1", "@pytest.mark.default_timeout(20 * 60) class TestFullRefresh(BaseTest): def test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger): configured_catalog", "from airbyte_cdk.models import Type from source_acceptance_test.base import BaseTest from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog,", "class TestFullRefresh(BaseTest): def test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger): configured_catalog = full_refresh_only_catalog(configured_catalog) output", "in output if message.type == Type.RECORD] output = docker_runner.call_read(connector_config, configured_catalog) records_2 = [message.record.data", "two sequential reads should produce either equal set of records or one of", "from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20 * 60) class TestFullRefresh(BaseTest): def test_sequential_reads(self,", "* 60) class TestFullRefresh(BaseTest): def test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger): configured_catalog =", "= full_refresh_only_catalog(configured_catalog) output = docker_runner.call_read(connector_config, configured_catalog) records_1 = [message.record.data for message in output", "airbyte_cdk.models import Type from source_acceptance_test.base import BaseTest from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize", "records_1)) - set(map(serialize, records_2)) if output_diff: msg = \"The two sequential reads should", "full_refresh_only_catalog(configured_catalog) output = docker_runner.call_read(connector_config, configured_catalog) records_1 = [message.record.data for message in output if", "serialize @pytest.mark.default_timeout(20 * 60) class TestFullRefresh(BaseTest): def test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger):", "message in output if message.type == Type.RECORD] output_diff = set(map(serialize, records_1)) - set(map(serialize,", "Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import pytest from airbyte_cdk.models", "= \"The two sequential reads should produce either equal set of records or", "import ConnectorRunner, full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20 * 60) class TestFullRefresh(BaseTest): def test_sequential_reads(self, connector_config, configured_catalog,", "records_1 = [message.record.data for message in output if message.type == Type.RECORD] output =", "# Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import pytest from", "\"The two sequential reads should produce either equal set of records or one", "Inc., all rights reserved. # import pytest from airbyte_cdk.models import Type from source_acceptance_test.base", "(c) 2021 Airbyte, Inc., all rights reserved. # import pytest from airbyte_cdk.models import", "[message.record.data for message in output if message.type == Type.RECORD] output_diff = set(map(serialize, records_1))", "one of them is a strict subset of the other\" detailed_logger.info(msg) detailed_logger.log_json_list(output_diff) pytest.fail(msg)", "records_2 = [message.record.data for message in output if message.type == Type.RECORD] output_diff =", "docker_runner.call_read(connector_config, configured_catalog) records_1 = [message.record.data for message in output if message.type == Type.RECORD]", "for message in output if message.type == Type.RECORD] output = docker_runner.call_read(connector_config, configured_catalog) records_2", "from source_acceptance_test.base import BaseTest from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20 * 60)", "message.type == Type.RECORD] output = docker_runner.call_read(connector_config, configured_catalog) records_2 = [message.record.data for message in", "[message.record.data for message in output if message.type == Type.RECORD] output = docker_runner.call_read(connector_config, configured_catalog)", "message in output if message.type == Type.RECORD] output = docker_runner.call_read(connector_config, configured_catalog) records_2 =", "output if message.type == Type.RECORD] output_diff = set(map(serialize, records_1)) - set(map(serialize, records_2)) if", "if message.type == Type.RECORD] output = docker_runner.call_read(connector_config, configured_catalog) records_2 = [message.record.data for message", "output_diff = set(map(serialize, records_1)) - set(map(serialize, records_2)) if output_diff: msg = \"The two", "import BaseTest from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize @pytest.mark.default_timeout(20 * 60) class TestFullRefresh(BaseTest):", "Type.RECORD] output = docker_runner.call_read(connector_config, configured_catalog) records_2 = [message.record.data for message in output if", "records_2)) if output_diff: msg = \"The two sequential reads should produce either equal", "rights reserved. # import pytest from airbyte_cdk.models import Type from source_acceptance_test.base import BaseTest", "test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger): configured_catalog = full_refresh_only_catalog(configured_catalog) output = docker_runner.call_read(connector_config, configured_catalog)", "docker_runner.call_read(connector_config, configured_catalog) records_2 = [message.record.data for message in output if message.type == Type.RECORD]", "sequential reads should produce either equal set of records or one of them", "<filename>airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/tests/test_full_refresh.py # # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import", "pytest from airbyte_cdk.models import Type from source_acceptance_test.base import BaseTest from source_acceptance_test.utils import ConnectorRunner,", "== Type.RECORD] output = docker_runner.call_read(connector_config, configured_catalog) records_2 = [message.record.data for message in output", "output = docker_runner.call_read(connector_config, configured_catalog) records_1 = [message.record.data for message in output if message.type", "# import pytest from airbyte_cdk.models import Type from source_acceptance_test.base import BaseTest from source_acceptance_test.utils", "should produce either equal set of records or one of them is a", "= docker_runner.call_read(connector_config, configured_catalog) records_2 = [message.record.data for message in output if message.type ==", "detailed_logger): configured_catalog = full_refresh_only_catalog(configured_catalog) output = docker_runner.call_read(connector_config, configured_catalog) records_1 = [message.record.data for message" ]
[ "'456' self.phone_invalid_TYPE = 'asdaaaaaads' self.phone_invalid_MAX = '456134564898761' self.email_valid = '<EMAIL>' self.email_invalid = 'admin.com'", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self): form_data = {'name': self.name_invalid_MIN, 'phone': self.phone_valid,", "'a@a.a' self.email_invalid_MAX = '<EMAIL>' self.email_invalid_BASE = '<EMAIL>' self.password_valid = '<PASSWORD>' self.password_invalid = '<PASSWORD>'", "self.CPF_document_invalid_MAX, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self):", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "'UF': self.UF_invalid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self): form_data = {'name':", "test_forms_patient_password_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.phone_invalid = '456' self.phone_invalid_MIN = '456' self.phone_invalid_TYPE = 'asdaaaaaads' self.phone_invalid_MAX = '456134564898761' self.email_valid", "TestPatientForm(TestCase): def setUp(self): self.name_valid = '<NAME>' self.name_invalid = 'a12' self.name_invalid_TYPE = 'a@hjasgdjasd1al' self.name_invalid_MAX", "setUp(self): self.name_valid = '<NAME>' self.name_invalid = 'a12' self.name_invalid_TYPE = 'a@hjasgdjasd1al' self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd'", "\"<EMAIL>\" user.save() def test_forms_patient_is_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth':", "'<EMAIL>' self.email_invalid_BASE = '<EMAIL>' self.password_valid = '<PASSWORD>' self.password_invalid = '<PASSWORD>' self.password_invalid_MAX = '<PASSWORD>'", "'<PASSWORD>' self.password_invalid_MAX = '<PASSWORD>' self.password_invalid_MIN = '<PASSWORD>' self.password_invalid_TYPE = '<PASSWORD>!' self.date_of_birth_valid = '10/12/1990'", "user = User() user.email = \"<EMAIL>\" user.save() def test_forms_patient_is_valid(self): form_data = {'name': self.name_valid,", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data = {'name':", "{'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_TYPE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "'password': self.password_valid, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid, 'UF':", "self.email_invalid_BASE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self): form_data = {'name':", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': self.<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "User class TestPatientForm(TestCase): def setUp(self): self.name_valid = '<NAME>' self.name_invalid = 'a12' self.name_invalid_TYPE =", "def test_forms_patient_CEP_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.email_invalid_MIN, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "'11111111111' self.CPF_document_invalid_TYPE = '252627282930asdf' self.CEP_valid = 72850735 self.CEP_invalid = '7285073A' self.CEP_invalid_MIN = 42", "user.email = \"<EMAIL>\" user.save() def test_forms_patient_is_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self): form_data = {'name': self.name_invalid_MAX, 'phone': self.phone_valid, 'email': self.email_valid,", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid,", "def test_forms_patient_name_is_not_valid_MAX(self): form_data = {'name': self.name_invalid_MAX, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "= PatientForm(data=form_data) self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self): form_data = {'name': self.name_invalid_TYPE, 'phone': self.phone_valid, 'email': self.email_valid,", "def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.date_of_birth_valid, 'CEP': self.CEP_invalid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form =", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'CEP': self.CEP_valid, 'UF': self.UF_invalid_MAX, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data)", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form", "= '10/12/1990' self.date_of_birth_invalid = '18' self.date_of_birth_invalid_FORMAT = '18' self.date_of_birth_invalid_MIN = '10/12/2020' self.sex_valid =", "'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN = 'a' self.phone_valid = '1234567890' self.phone_invalid = '456' self.phone_invalid_MIN = '456'", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self): form_data", "self.CPF_document_invalid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "self.CEP_valid, 'UF': self.UF_invalid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self):", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self): form_data = {'name': self.name_valid,", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self): form_data = {'name': self.name_valid,", "self.UF_valid, 'city': self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self):", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self): form_data =", "'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': self.<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "= '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user = User() user.email = \"<EMAIL>\" user.save() def test_forms_patient_is_valid(self):", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid}", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone':", "def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self): form_data = {'name': self.name_valid,", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "'DF' self.UF_invalid = '' self.UF_invalid_MIN = 'A' self.UF_invalid_MAX = 'AAA' self.city_valid = 'Bras", "test_forms_patient_name_is_not_valid_MIN(self): form_data = {'name': self.name_invalid_MIN, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': self.<PASSWORD>,", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MAX, 'email': self.email_valid, 'password':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data =", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self): form_data", "def test_forms_patient_email_is_not_valid_BASE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_BASE, 'password': <PASSWORD>, 'confirm_password':", "self.sex_invalid = 'A' self.CPF_document_valid = '61367541000' self.CPF_document_invalid = '11111111111' self.CPF_document_invalid_MIN = '111111111' self.CPF_document_invalid_MAX", "'city': self.city_invalid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self): form_data", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN, 'sex': self.sex_valid,", "self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_invalid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self):", "= 'a@hjasgdjasd1al' self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN = 'a' self.phone_valid = '1234567890' self.phone_invalid =", "self.email_valid, 'password': <PASSWORD>, 'confirm_password': self.<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "self.UF_invalid_MAX, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self):", "self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid_MAX, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form =", "'CEP': self.CEP_invalid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data)", "'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data)", "'11111111111' self.CPF_document_invalid_MIN = '111111111' self.CPF_document_invalid_MAX = '11111111111' self.CPF_document_invalid_TYPE = '252627282930asdf' self.CEP_valid = 72850735", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self): form_data = {'name':", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self):", "'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def", "'password': self.<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF':", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone':", "<PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self): form_data", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self): form_data", "= 72850735 self.CEP_invalid = '7285073A' self.CEP_invalid_MIN = 42 self.CEP_invalid_MAX = 728507351 self.UF_valid =", "PatientForm from user.models import User class TestPatientForm(TestCase): def setUp(self): self.name_valid = '<NAME>' self.name_invalid", "'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data)", "self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN = 'a' self.phone_valid = '1234567890' self.phone_invalid = '456' self.phone_invalid_MIN", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self): form_data =", "self.name_valid, 'phone': self.phone_invalid_MIN, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement':", "self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "{'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN, 'sex':", "= '<PASSWORD>' self.password_invalid_MAX = '<PASSWORD>' self.password_invalid_MIN = '<PASSWORD>' self.password_invalid_TYPE = '<PASSWORD>!' self.date_of_birth_valid =", "self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user = User() user.email = \"<EMAIL>\" user.save() def", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self): form_data", "= {'name': self.name_valid, 'phone': self.phone_invalid_MAX, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "15' self.complement_invalid = '' self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user = User() user.email", "self.CPF_document_valid = '61367541000' self.CPF_document_invalid = '11111111111' self.CPF_document_invalid_MIN = '111111111' self.CPF_document_invalid_MAX = '11111111111' self.CPF_document_invalid_TYPE", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self): form_data =", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self): form_data", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MIN, 'email': self.email_valid, 'password':", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MIN, 'password':", "= 'M' self.sex_invalid = 'A' self.CPF_document_valid = '61367541000' self.CPF_document_invalid = '11111111111' self.CPF_document_invalid_MIN =", "= {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user = User() user.email = \"<EMAIL>\" user.save() def test_forms_patient_is_valid(self): form_data = {'name':", "self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP':", "self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid_MAX} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid,", "<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid, 'UF': self.UF_valid,", "self.name_invalid_TYPE, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone':", "'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data)", "'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def", "'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood':", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city':", "= 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid = 'Rua 01, Quadra 10, Lote 15' self.complement_invalid = ''", "'''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user = User() user.email = \"<EMAIL>\" user.save() def test_forms_patient_is_valid(self): form_data", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "= 'Setor Leste' self.neighborhood_invalid = '' self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid = 'Rua 01,", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form", "= '<EMAIL>' self.email_invalid = 'admin.com' self.email_invalid_TYPE = 'admin.com' self.email_invalid_MIN = 'a@a.a' self.email_invalid_MAX =", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX, 'sex': self.sex_valid,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self):", "self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form =", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "= '11111111111' self.CPF_document_invalid_TYPE = '252627282930asdf' self.CEP_valid = 72850735 self.CEP_invalid = '7285073A' self.CEP_invalid_MIN =", "self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.email_invalid = 'admin.com' self.email_invalid_TYPE = 'admin.com' self.email_invalid_MIN = 'a@a.a' self.email_invalid_MAX = '<EMAIL>' self.email_invalid_BASE", "self.email_invalid_MIN = 'a@a.a' self.email_invalid_MAX = '<EMAIL>' self.email_invalid_BASE = '<EMAIL>' self.password_valid = '<PASSWORD>' self.password_invalid", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self): form_data = {'name': self.name_valid,", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "self.email_valid, 'password': self.password_valid, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid,", "'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX, 'complement': self.complement_valid} form = PatientForm(data=form_data)", "test_forms_patient_phone_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MAX, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>,", "form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MAX, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document':", "'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid, 'neighborhood':", "'<PASSWORD>!' self.date_of_birth_valid = '10/12/1990' self.date_of_birth_invalid = '18' self.date_of_birth_invalid_FORMAT = '18' self.date_of_birth_invalid_MIN = '10/12/2020'", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data", "self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid_MAX} form", "'18' self.date_of_birth_invalid_FORMAT = '18' self.date_of_birth_invalid_MIN = '10/12/2020' self.sex_valid = 'M' self.sex_invalid = 'A'", "'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "= 'Rua 01, Quadra 10, Lote 15' self.complement_invalid = '' self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid} form", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self): form_data", "'<EMAIL>' self.password_valid = '<PASSWORD>' self.password_invalid = '<PASSWORD>' self.password_invalid_MAX = '<PASSWORD>' self.password_invalid_MIN = '<PASSWORD>'", "= {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE,", "72850735 self.CEP_invalid = '7285073A' self.CEP_invalid_MIN = 42 self.CEP_invalid_MAX = 728507351 self.UF_valid = 'DF'", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX, 'complement': self.complement_valid}", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX, 'complement': self.complement_valid} form", "self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self): form_data = {'name': self.name_invalid_TYPE, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "django.test import TestCase from user.forms import PatientForm from user.models import User class TestPatientForm(TestCase):", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self): form_data = {'name':", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self): form_data = {'name':", "self.neighborhood_invalid_MAX, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self): form_data = {'name': self.name_valid,", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone':", "= {'name': self.name_invalid_MAX, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid,", "self.name_invalid_MIN = 'a' self.phone_valid = '1234567890' self.phone_invalid = '456' self.phone_invalid_MIN = '456' self.phone_invalid_TYPE", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "'CEP': self.CEP_invalid_MAX, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data)", "{'name': self.name_valid, 'phone': self.phone_invalid_MIN, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "test_forms_patient_UF_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self): form_data = {'name': self.name_invalid_MIN,", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid, 'UF': self.UF_valid,", "'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_invalid, 'date_of_birth':", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid_MAX, 'city':", "'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid, 'UF':", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self): form_data", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_TYPE, 'email': self.email_valid, 'password':", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self): form_data = {'name': self.name_invalid_MAX, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data = {'name': self.name_valid,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self): form_data = {'name':", "= '<EMAIL>' self.email_invalid_BASE = '<EMAIL>' self.password_valid = '<PASSWORD>' self.password_invalid = '<PASSWORD>' self.password_invalid_MAX =", "test_forms_patient_CEP_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.password_valid, 'confirm_password': <PASSWORD>,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self):", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP':", "'A' self.UF_invalid_MAX = 'AAA' self.city_valid = 'Bras lia' self.city_invalid = '' self.city_invalid_MAX =", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self):", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'CPF_document': self.CPF_document_invalid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self): form_data =", "self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self): form_data = {'name': self.name_invalid_MAX, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self): form_data = {'name':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data =", "= '456' self.phone_invalid_MIN = '456' self.phone_invalid_TYPE = 'asdaaaaaads' self.phone_invalid_MAX = '456134564898761' self.email_valid =", "'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': self.<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth':", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self): form_data = {'name': self.name_valid, 'phone':", "self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "test_forms_patient_name_is_not_valid_TYPE(self): form_data = {'name': self.name_invalid_TYPE, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.name_valid = '<NAME>' self.name_invalid = 'a12' self.name_invalid_TYPE = 'a@hjasgdjasd1al' self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN", "'1234567890' self.phone_invalid = '456' self.phone_invalid_MIN = '456' self.phone_invalid_TYPE = 'asdaaaaaads' self.phone_invalid_MAX = '456134564898761'", "'CPF_document': self.CPF_document_valid, 'sex': self.sex_invalid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood':", "form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MIN, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document':", "form_data = {'name': self.name_invalid_MAX, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document':", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone':", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self): form_data = {'name': self.name_valid,", "'' self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid = 'Rua 01, Quadra 10, Lote 15' self.complement_invalid", "self.CPF_document_invalid_MAX = '11111111111' self.CPF_document_invalid_TYPE = '252627282930asdf' self.CEP_valid = 72850735 self.CEP_invalid = '7285073A' self.CEP_invalid_MIN", "= 'Bras lia' self.city_invalid = '' self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid = 'Setor Leste'", "self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_BASE, 'password':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self): form_data =", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user = User() user.email = \"<EMAIL>\" user.save() def test_forms_patient_is_valid(self): form_data =", "self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid,", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form =", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self): form_data = {'name': self.name_valid,", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data = {'name': self.name_valid,", "'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self): form_data =", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "def test_forms_patient_CEP_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.password_valid, 'confirm_password':", "'asdaaaaaads' self.phone_invalid_MAX = '456134564898761' self.email_valid = '<EMAIL>' self.email_invalid = 'admin.com' self.email_invalid_TYPE = 'admin.com'", "= {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX,", "self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MAX, 'email': self.email_valid, 'password': <PASSWORD>,", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone':", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self):", "def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "def test_forms_patient_phone_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MAX, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation':", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.password_valid, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid, 'sex': self.sex_valid,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self):", "'phone': self.phone_valid, 'email': self.email_invalid_TYPE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth':", "'10/12/2020' self.sex_valid = 'M' self.sex_invalid = 'A' self.CPF_document_valid = '61367541000' self.CPF_document_invalid = '11111111111'", "self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.<PASSWORD>,", "'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "'phone': self.phone_invalid_MIN, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth':", "self.CEP_invalid_MAX = 728507351 self.UF_valid = 'DF' self.UF_invalid = '' self.UF_invalid_MIN = 'A' self.UF_invalid_MAX", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone':", "self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid,", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid}", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self): form_data = {'name': self.name_valid,", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid_MAX}", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MAX, 'password':", "<PASSWORD>, 'CPF_document': self.CPF_document_invalid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid, 'UF': self.UF_valid, 'city': self.city_valid,", "{'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MAX, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "= '<PASSWORD>!' self.date_of_birth_valid = '10/12/1990' self.date_of_birth_invalid = '18' self.date_of_birth_invalid_FORMAT = '18' self.date_of_birth_invalid_MIN =", "= '<PASSWORD>' self.password_invalid_MIN = '<PASSWORD>' self.password_invalid_TYPE = '<PASSWORD>!' self.date_of_birth_valid = '10/12/1990' self.date_of_birth_invalid =", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self): form_data = {'name':", "self.phone_valid, 'email': self.email_invalid_TYPE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "Lote 15' self.complement_invalid = '' self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user = User()", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid}", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self): form_data = {'name':", "form_data = {'name': self.name_invalid_MIN, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': self.<PASSWORD>, 'CPF_document':", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "self.phone_valid, 'email': self.email_invalid_MAX, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self):", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data = {'name':", "self.neighborhood_valid = 'Setor Leste' self.neighborhood_invalid = '' self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid = 'Rua", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone':", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "= 42 self.CEP_invalid_MAX = 728507351 self.UF_valid = 'DF' self.UF_invalid = '' self.UF_invalid_MIN =", "= 'A' self.UF_invalid_MAX = 'AAA' self.city_valid = 'Bras lia' self.city_invalid = '' self.city_invalid_MAX", "form_data = {'name': self.name_valid, 'phone': self.phone_invalid_TYPE, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document':", "'email': self.email_valid, 'password': self.<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self): form_data = {'name': self.name_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_invalid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MIN, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "test_forms_patient_neighborhood_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE, 'sex': self.sex_valid,", "<PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid,", "self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "= '<EMAIL>' self.password_valid = '<PASSWORD>' self.password_invalid = '<PASSWORD>' self.password_invalid_MAX = '<PASSWORD>' self.password_invalid_MIN =", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self): form_data =", "'<EMAIL>' self.email_invalid = 'admin.com' self.email_invalid_TYPE = 'admin.com' self.email_invalid_MIN = 'a@a.a' self.email_invalid_MAX = '<EMAIL>'", "self.complement_valid = 'Rua 01, Quadra 10, Lote 15' self.complement_invalid = '' self.complement_invalid_MAX =", "<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX, 'UF': self.UF_valid,", "01, Quadra 10, Lote 15' self.complement_invalid = '' self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'''", "test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self): form_data = {'name': self.name_valid,", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'city': self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self): form_data", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MIN, 'email':", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid, 'complement': self.complement_valid} form", "= 'admin.com' self.email_invalid_TYPE = 'admin.com' self.email_invalid_MIN = 'a@a.a' self.email_invalid_MAX = '<EMAIL>' self.email_invalid_BASE =", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self):", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid_MAX, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form", "= 'a' self.phone_valid = '1234567890' self.phone_invalid = '456' self.phone_invalid_MIN = '456' self.phone_invalid_TYPE =", "test_forms_patient_password_confirmation_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "def test_forms_patient_password_confirmation_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid = 'Rua 01, Quadra 10, Lote 15' self.complement_invalid =", "= 728507351 self.UF_valid = 'DF' self.UF_invalid = '' self.UF_invalid_MIN = 'A' self.UF_invalid_MAX =", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid, 'UF': self.UF_valid, 'city':", "self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX,", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self): form_data = {'name': self.name_invalid_MIN, 'phone': self.phone_valid, 'email':", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self): form_data = {'name':", "<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid,", "'a@hjasgdjasd1al' self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN = 'a' self.phone_valid = '1234567890' self.phone_invalid = '456'", "def test_forms_patient_neighborhood_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MAX, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document':", "self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "'' self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user = User() user.email = \"<EMAIL>\" user.save()", "TestCase from user.forms import PatientForm from user.models import User class TestPatientForm(TestCase): def setUp(self):", "'456134564898761' self.email_valid = '<EMAIL>' self.email_invalid = 'admin.com' self.email_invalid_TYPE = 'admin.com' self.email_invalid_MIN = 'a@a.a'", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid = 'Rua 01, Quadra 10, Lote 15' self.complement_invalid = '' self.complement_invalid_MAX", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self):", "test_forms_patient_CEP_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "import TestCase from user.forms import PatientForm from user.models import User class TestPatientForm(TestCase): def", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "self.phone_invalid_MIN = '456' self.phone_invalid_TYPE = 'asdaaaaaads' self.phone_invalid_MAX = '456134564898761' self.email_valid = '<EMAIL>' self.email_invalid", "self.neighborhood_invalid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data = {'name': self.name_valid,", "self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_TYPE, 'email': self.email_valid, 'password': <PASSWORD>,", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid, 'complement':", "self.CEP_invalid = '7285073A' self.CEP_invalid_MIN = 42 self.CEP_invalid_MAX = 728507351 self.UF_valid = 'DF' self.UF_invalid", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self): form_data =", "self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form =", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MAX, 'email': self.email_valid,", "self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form =", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "'phone': self.phone_valid, 'email': self.email_valid, 'password': self.<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth':", "self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_invalid, 'date_of_birth': self.date_of_birth_valid,", "self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid}", "'password': <PASSWORD>, 'confirm_password': self.<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF':", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MAX, 'email':", "'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_invalid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF':", "'UF': self.UF_valid, 'city': self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def", "self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_TYPE, 'email': self.email_valid,", "user.models import User class TestPatientForm(TestCase): def setUp(self): self.name_valid = '<NAME>' self.name_invalid = 'a12'", "'' self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid = 'Setor Leste' self.neighborhood_invalid = '' self.neighborhood_invalid_MAX =", "'Setor Leste' self.neighborhood_invalid = '' self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid = 'Rua 01, Quadra", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data", "self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX, 'complement': self.complement_valid} form =", "= '18' self.date_of_birth_invalid_MIN = '10/12/2020' self.sex_valid = 'M' self.sex_invalid = 'A' self.CPF_document_valid =", "self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX, 'sex': self.sex_valid, 'date_of_birth':", "= {'name': self.name_invalid_MIN, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': self.<PASSWORD>, 'CPF_document': self.CPF_document_valid,", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city':", "test_forms_patient_UF_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid_MAX, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self): form_data = {'name':", "self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX,", "{'name': self.name_invalid_TYPE, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data =", "self.email_invalid_TYPE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self): form_data", "self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self):", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "self.name_invalid = 'a12' self.name_invalid_TYPE = 'a@hjasgdjasd1al' self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN = 'a' self.phone_valid", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self): form_data = {'name': self.name_valid,", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "self.UF_invalid_MAX = 'AAA' self.city_valid = 'Bras lia' self.city_invalid = '' self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'", "form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document':", "from django.test import TestCase from user.forms import PatientForm from user.models import User class", "self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form =", "'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood':", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self): form_data = {'name': self.name_invalid_MIN, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "self.email_invalid_TYPE = 'admin.com' self.email_invalid_MIN = 'a@a.a' self.email_invalid_MAX = '<EMAIL>' self.email_invalid_BASE = '<EMAIL>' self.password_valid", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self): form_data =", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self):", "self.CPF_document_invalid_MIN, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "'CPF_document': self.CPF_document_invalid_MAX, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood':", "self.phone_valid, 'email': self.email_invalid_BASE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "self.date_of_birth_invalid_FORMAT = '18' self.date_of_birth_invalid_MIN = '10/12/2020' self.sex_valid = 'M' self.sex_invalid = 'A' self.CPF_document_valid", "self.name_invalid_TYPE = 'a@hjasgdjasd1al' self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN = 'a' self.phone_valid = '1234567890' self.phone_invalid", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data = {'name':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MIN, 'email': self.email_valid,", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self): form_data =", "'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF':", "= '<NAME>' self.name_invalid = 'a12' self.name_invalid_TYPE = 'a@hjasgdjasd1al' self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN =", "self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid}", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "self.phone_invalid_TYPE = 'asdaaaaaads' self.phone_invalid_MAX = '456134564898761' self.email_valid = '<EMAIL>' self.email_invalid = 'admin.com' self.email_invalid_TYPE", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_TYPE, 'password':", "test_forms_patient_city_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.phone_valid, 'email': self.email_valid, 'password': self.<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "test_forms_patient_sex_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid_MAX, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city':", "'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX, 'UF':", "'email': self.email_valid, 'password': self.password_valid, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone':", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self): form_data = {'name': self.name_valid,", "class TestPatientForm(TestCase): def setUp(self): self.name_valid = '<NAME>' self.name_invalid = 'a12' self.name_invalid_TYPE = 'a@hjasgdjasd1al'", "self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid = 'Setor Leste' self.neighborhood_invalid = '' self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX, 'UF': self.UF_valid, 'city':", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self): form_data = {'name':", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid} form = PatientForm(data=form_data)", "'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid, 'complement': self.complement_valid} form = PatientForm(data=form_data)", "= {'name': self.name_valid, 'phone': self.phone_invalid_MIN, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self): form_data", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "self.CEP_valid = 72850735 self.CEP_invalid = '7285073A' self.CEP_invalid_MIN = 42 self.CEP_invalid_MAX = 728507351 self.UF_valid", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self): form_data = {'name': self.name_valid, 'phone':", "{'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.password_valid, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid, 'sex':", "def test_forms_patient_password_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.<PASSWORD>, 'confirm_password':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self): form_data =", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self): form_data = {'name': self.name_valid, 'phone':", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self): form_data", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self): form_data = {'name': self.name_valid, 'phone':", "'confirm_password': self.<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city':", "def test_forms_patient_email_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_TYPE, 'password': <PASSWORD>, 'confirm_password':", "self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "test_forms_patient_city_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid, 'city': self.city_valid, 'neighborhood':", "self.password_invalid_TYPE = '<PASSWORD>!' self.date_of_birth_valid = '10/12/1990' self.date_of_birth_invalid = '18' self.date_of_birth_invalid_FORMAT = '18' self.date_of_birth_invalid_MIN", "test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.name_valid, 'phone': self.phone_invalid_MAX, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "= 'a12' self.name_invalid_TYPE = 'a@hjasgdjasd1al' self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN = 'a' self.phone_valid =", "{'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_invalid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid,", "self.email_invalid_MAX, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "'email': self.email_invalid_MAX, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "self.password_valid = '<PASSWORD>' self.password_invalid = '<PASSWORD>' self.password_invalid_MAX = '<PASSWORD>' self.password_invalid_MIN = '<PASSWORD>' self.password_invalid_TYPE", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "self.UF_invalid_MIN = 'A' self.UF_invalid_MAX = 'AAA' self.city_valid = 'Bras lia' self.city_invalid = ''", "'UF': self.UF_invalid_MAX, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def", "= '61367541000' self.CPF_document_invalid = '11111111111' self.CPF_document_invalid_MIN = '111111111' self.CPF_document_invalid_MAX = '11111111111' self.CPF_document_invalid_TYPE =", "self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form =", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self): form_data", "= \"<EMAIL>\" user.save() def test_forms_patient_is_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self): form_data = {'name': self.name_valid,", "'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city':", "<PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid,", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self): form_data = {'name': self.name_invalid_MIN, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "'' self.UF_invalid_MIN = 'A' self.UF_invalid_MAX = 'AAA' self.city_valid = 'Bras lia' self.city_invalid =", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self): form_data = {'name':", "'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MIN,", "{'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data =", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self): form_data =", "self.CEP_invalid_MAX, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "test_forms_patient_email_is_not_valid_BASE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_BASE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self):", "= '<PASSWORD>' self.password_invalid = '<PASSWORD>' self.password_invalid_MAX = '<PASSWORD>' self.password_invalid_MIN = '<PASSWORD>' self.password_invalid_TYPE =", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone':", "self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MIN, 'password': <PASSWORD>,", "self.CPF_document_invalid_TYPE, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "= {'name': self.name_valid, 'phone': self.phone_invalid_TYPE, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "self.city_valid = 'Bras lia' self.city_invalid = '' self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid = 'Setor", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self): form_data = {'name': self.name_invalid_MIN, 'phone':", "= 'asdaaaaaads' self.phone_invalid_MAX = '456134564898761' self.email_valid = '<EMAIL>' self.email_invalid = 'admin.com' self.email_invalid_TYPE =", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone':", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "'CPF_document': self.CPF_document_invalid_TYPE, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood':", "test_forms_patient_phone_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_TYPE, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self):", "self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data = {'name': self.name_valid,", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "def test_forms_patient_name_is_not_valid_TYPE(self): form_data = {'name': self.name_invalid_TYPE, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "<PASSWORD>, 'confirm_password': self.<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid,", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "'<PASSWORD>' self.password_invalid = '<PASSWORD>' self.password_invalid_MAX = '<PASSWORD>' self.password_invalid_MIN = '<PASSWORD>' self.password_invalid_TYPE = '<PASSWORD>!'", "'email': self.email_invalid_MIN, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "self.email_valid, 'password': self.<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "self.city_valid, 'neighborhood': self.neighborhood_invalid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data =", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self): form_data = {'name': self.name_invalid_MAX, 'phone': self.phone_valid, 'email':", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self): form_data = {'name':", "form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_TYPE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document':", "'email': self.email_invalid_TYPE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "'phone': self.phone_valid, 'email': self.email_invalid_MAX, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth':", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form", "'a' self.phone_valid = '1234567890' self.phone_invalid = '456' self.phone_invalid_MIN = '456' self.phone_invalid_TYPE = 'asdaaaaaads'", "self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_BASE, 'password': <PASSWORD>,", "<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid,", "'A' self.CPF_document_valid = '61367541000' self.CPF_document_invalid = '11111111111' self.CPF_document_invalid_MIN = '111111111' self.CPF_document_invalid_MAX = '11111111111'", "'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "self.complement_valid} form = PatientForm(data=form_data) self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self): form_data = {'name': self.name_invalid_TYPE, 'phone': self.phone_valid,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data = {'name':", "self.neighborhood_invalid = '' self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid = 'Rua 01, Quadra 10, Lote", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self):", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self): form_data =", "'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid_MAX} form = PatientForm(data=form_data)", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self): form_data = {'name': self.name_valid,", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_TYPE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self): form_data = {'name':", "self.CPF_document_invalid_TYPE = '252627282930asdf' self.CEP_valid = 72850735 self.CEP_invalid = '7285073A' self.CEP_invalid_MIN = 42 self.CEP_invalid_MAX", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_invalid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city':", "'CPF_document': self.CPF_document_invalid_MIN, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood':", "'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid = 'Setor Leste' self.neighborhood_invalid = '' self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid =", "'phone': self.phone_valid, 'email': self.email_valid, 'password': self.password_valid, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid, 'sex': self.sex_valid, 'date_of_birth':", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid, 'complement': self.complement_valid}", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_BASE,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self):", "'7285073A' self.CEP_invalid_MIN = 42 self.CEP_invalid_MAX = 728507351 self.UF_valid = 'DF' self.UF_invalid = ''", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "'<NAME>' self.name_invalid = 'a12' self.name_invalid_TYPE = 'a@hjasgdjasd1al' self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN = 'a'", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self): form_data = {'name':", "self.complement_invalid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "self.<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid,", "def test_forms_patient_complement_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self):", "'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood':", "form = PatientForm(data=form_data) self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self): form_data = {'name': self.name_invalid_TYPE, 'phone': self.phone_valid, 'email':", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self): form_data = {'name': self.name_invalid_TYPE,", "'M' self.sex_invalid = 'A' self.CPF_document_valid = '61367541000' self.CPF_document_invalid = '11111111111' self.CPF_document_invalid_MIN = '111111111'", "self.phone_valid, 'email': self.email_invalid_MIN, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self): form_data = {'name': self.name_valid, 'phone':", "self.UF_invalid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self):", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid_MAX, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid}", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MIN,", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid, 'complement':", "def test_forms_patient_email_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MAX, 'password': <PASSWORD>, 'confirm_password':", "form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document':", "self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "= '7285073A' self.CEP_invalid_MIN = 42 self.CEP_invalid_MAX = 728507351 self.UF_valid = 'DF' self.UF_invalid =", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement':", "'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE, 'sex': self.sex_valid, 'date_of_birth':", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self): form_data", "'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF':", "= 'a@a.a' self.email_invalid_MAX = '<EMAIL>' self.email_invalid_BASE = '<EMAIL>' self.password_valid = '<PASSWORD>' self.password_invalid =", "self.<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid,", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "def test_forms_patient_complement_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "= '' self.UF_invalid_MIN = 'A' self.UF_invalid_MAX = 'AAA' self.city_valid = 'Bras lia' self.city_invalid", "form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MIN, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document':", "self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertTrue(form.is_valid())", "def test_forms_patient_password_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.phone_valid = '1234567890' self.phone_invalid = '456' self.phone_invalid_MIN = '456' self.phone_invalid_TYPE = 'asdaaaaaads' self.phone_invalid_MAX", "{'name': self.name_invalid_MAX, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_TYPE,", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "self.email_invalid_BASE = '<EMAIL>' self.password_valid = '<PASSWORD>' self.password_invalid = '<PASSWORD>' self.password_invalid_MAX = '<PASSWORD>' self.password_invalid_MIN", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self): form_data =", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self): form_data = {'name': self.name_invalid_MIN, 'phone': self.phone_valid, 'email': self.email_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self): form_data", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self): form_data = {'name': self.name_valid,", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self): form_data =", "'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid_MAX, 'neighborhood':", "'phone': self.phone_valid, 'email': self.email_invalid_MIN, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth':", "= '111111111' self.CPF_document_invalid_MAX = '11111111111' self.CPF_document_invalid_TYPE = '252627282930asdf' self.CEP_valid = 72850735 self.CEP_invalid =", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data = {'name':", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self): form_data = {'name': self.name_invalid_MAX,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self):", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid}", "= '<PASSWORD>' self.password_invalid_TYPE = '<PASSWORD>!' self.date_of_birth_valid = '10/12/1990' self.date_of_birth_invalid = '18' self.date_of_birth_invalid_FORMAT =", "def test_forms_patient_is_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "user.forms import PatientForm from user.models import User class TestPatientForm(TestCase): def setUp(self): self.name_valid =", "form_data = {'name': self.name_invalid_TYPE, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document':", "'Bras lia' self.city_invalid = '' self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid = 'Setor Leste' self.neighborhood_invalid", "{'name': self.name_invalid_MIN, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': self.<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data =", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self): form_data = {'name':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self): form_data =", "= '456134564898761' self.email_valid = '<EMAIL>' self.email_invalid = 'admin.com' self.email_invalid_TYPE = 'admin.com' self.email_invalid_MIN =", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self): form_data", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid}", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "'complement': self.complement_invalid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone':", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "Leste' self.neighborhood_invalid = '' self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid = 'Rua 01, Quadra 10,", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MAX, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "'phone': self.phone_valid, 'email': self.email_invalid_BASE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth':", "= User() user.email = \"<EMAIL>\" user.save() def test_forms_patient_is_valid(self): form_data = {'name': self.name_valid, 'phone':", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "def test_forms_patient_city_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "test_forms_patient_email_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MAX, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "'252627282930asdf' self.CEP_valid = 72850735 self.CEP_invalid = '7285073A' self.CEP_invalid_MIN = 42 self.CEP_invalid_MAX = 728507351", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self): form_data = {'name': self.name_valid,", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_sex_is_not_valid(self): form_data = {'name': self.name_valid, 'phone':", "'a12' self.name_invalid_TYPE = 'a@hjasgdjasd1al' self.name_invalid_MAX = 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN = 'a' self.phone_valid = '1234567890'", "'UF': self.UF_valid, 'city': self.city_invalid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def", "test_forms_patient_complement_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid,", "= '252627282930asdf' self.CEP_valid = 72850735 self.CEP_invalid = '7285073A' self.CEP_invalid_MIN = 42 self.CEP_invalid_MAX =", "= {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_TYPE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self): form_data = {'name':", "test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid_MAX,", "<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid_MAX, 'city': self.city_valid,", "def test_forms_patient_phone_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MIN, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "{'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_BASE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "lia' self.city_invalid = '' self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid = 'Setor Leste' self.neighborhood_invalid =", "'phone': self.phone_invalid_MAX, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth':", "'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN, 'sex': self.sex_valid, 'date_of_birth':", "self.CEP_invalid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self): form_data =", "= {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN,", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form", "self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid,", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement':", "= '1234567890' self.phone_invalid = '456' self.phone_invalid_MIN = '456' self.phone_invalid_TYPE = 'asdaaaaaads' self.phone_invalid_MAX =", "self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MIN, 'email': self.email_valid, 'password': <PASSWORD>,", "def test_forms_patient_sex_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "= {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.password_valid, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid,", "form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_BASE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document':", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid_MAX, 'city': self.city_valid, 'neighborhood':", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self): form_data = {'name': self.name_valid,", "<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX, 'UF': self.UF_valid, 'city': self.city_valid,", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self): form_data = {'name': self.name_valid, 'phone':", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data = {'name': self.name_valid, 'phone':", "= '456' self.phone_invalid_TYPE = 'asdaaaaaads' self.phone_invalid_MAX = '456134564898761' self.email_valid = '<EMAIL>' self.email_invalid =", "= '10/12/2020' self.sex_valid = 'M' self.sex_invalid = 'A' self.CPF_document_valid = '61367541000' self.CPF_document_invalid =", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self): form_data = {'name': self.name_valid,", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self): form_data = {'name':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_TYPE,", "'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid, 'UF':", "self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "42 self.CEP_invalid_MAX = 728507351 self.UF_valid = 'DF' self.UF_invalid = '' self.UF_invalid_MIN = 'A'", "self.date_of_birth_valid = '10/12/1990' self.date_of_birth_invalid = '18' self.date_of_birth_invalid_FORMAT = '18' self.date_of_birth_invalid_MIN = '10/12/2020' self.sex_valid", "self.name_valid, 'phone': self.phone_invalid_TYPE, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "'18' self.date_of_birth_invalid_MIN = '10/12/2020' self.sex_valid = 'M' self.sex_invalid = 'A' self.CPF_document_valid = '61367541000'", "= 'DF' self.UF_invalid = '' self.UF_invalid_MIN = 'A' self.UF_invalid_MAX = 'AAA' self.city_valid =", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid, 'city': self.city_valid,", "PatientForm(data=form_data) self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self): form_data = {'name': self.name_invalid_TYPE, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "def test_forms_patient_phone_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_TYPE, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation':", "def test_forms_patient_UF_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "self.CPF_document_invalid = '11111111111' self.CPF_document_invalid_MIN = '111111111' self.CPF_document_invalid_MAX = '11111111111' self.CPF_document_invalid_TYPE = '252627282930asdf' self.CEP_valid", "test_forms_patient_is_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "def test_forms_patient_email_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MIN, 'password': <PASSWORD>, 'confirm_password':", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form =", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data = {'name': self.name_valid,", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self): form_data = {'name': self.name_valid,", "def test_forms_patient_city_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone':", "self.UF_invalid = '' self.UF_invalid_MIN = 'A' self.UF_invalid_MAX = 'AAA' self.city_valid = 'Bras lia'", "user.save() def test_forms_patient_is_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "'111111111' self.CPF_document_invalid_MAX = '11111111111' self.CPF_document_invalid_TYPE = '252627282930asdf' self.CEP_valid = 72850735 self.CEP_invalid = '7285073A'", "self.CEP_valid, 'UF': self.UF_invalid_MAX, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "{'name': self.name_valid, 'phone': self.phone_invalid_MAX, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self): form_data =", "self.date_of_birth_invalid = '18' self.date_of_birth_invalid_FORMAT = '18' self.date_of_birth_invalid_MIN = '10/12/2020' self.sex_valid = 'M' self.sex_invalid", "= 'AAA' self.city_valid = 'Bras lia' self.city_invalid = '' self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_BASE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "self.password_invalid_MAX = '<PASSWORD>' self.password_invalid_MIN = '<PASSWORD>' self.password_invalid_TYPE = '<PASSWORD>!' self.date_of_birth_valid = '10/12/1990' self.date_of_birth_invalid", "self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_TYPE, 'password': <PASSWORD>,", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "= '11111111111' self.CPF_document_invalid_MIN = '111111111' self.CPF_document_invalid_MAX = '11111111111' self.CPF_document_invalid_TYPE = '252627282930asdf' self.CEP_valid =", "'phone': self.phone_invalid_TYPE, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth':", "'<PASSWORD>' self.password_invalid_MIN = '<PASSWORD>' self.password_invalid_TYPE = '<PASSWORD>!' self.date_of_birth_valid = '10/12/1990' self.date_of_birth_invalid = '18'", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self):", "test_forms_patient_password_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.<PASSWORD>, 'confirm_password': <PASSWORD>,", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data = {'name': self.name_valid,", "self.CPF_document_invalid_MIN = '111111111' self.CPF_document_invalid_MAX = '11111111111' self.CPF_document_invalid_TYPE = '252627282930asdf' self.CEP_valid = 72850735 self.CEP_invalid", "'sex': self.sex_invalid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement':", "= 'A' self.CPF_document_valid = '61367541000' self.CPF_document_invalid = '11111111111' self.CPF_document_invalid_MIN = '111111111' self.CPF_document_invalid_MAX =", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self): form_data", "self.password_invalid_MIN = '<PASSWORD>' self.password_invalid_TYPE = '<PASSWORD>!' self.date_of_birth_valid = '10/12/1990' self.date_of_birth_invalid = '18' self.date_of_birth_invalid_FORMAT", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement':", "= {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MIN, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "from user.forms import PatientForm from user.models import User class TestPatientForm(TestCase): def setUp(self): self.name_valid", "self.password_valid, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid, 'UF': self.UF_valid,", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city':", "self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.password_valid,", "728507351 self.UF_valid = 'DF' self.UF_invalid = '' self.UF_invalid_MIN = 'A' self.UF_invalid_MAX = 'AAA'", "self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid_MAX} form =", "self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_invalid,", "self.email_valid = '<EMAIL>' self.email_invalid = 'admin.com' self.email_invalid_TYPE = 'admin.com' self.email_invalid_MIN = 'a@a.a' self.email_invalid_MAX", "= 'admin.com' self.email_invalid_MIN = 'a@a.a' self.email_invalid_MAX = '<EMAIL>' self.email_invalid_BASE = '<EMAIL>' self.password_valid =", "self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "import User class TestPatientForm(TestCase): def setUp(self): self.name_valid = '<NAME>' self.name_invalid = 'a12' self.name_invalid_TYPE", "self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid, 'complement': self.complement_valid} form =", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self): form_data = {'name':", "self.phone_invalid_MAX = '456134564898761' self.email_valid = '<EMAIL>' self.email_invalid = 'admin.com' self.email_invalid_TYPE = 'admin.com' self.email_invalid_MIN", "Quadra 10, Lote 15' self.complement_invalid = '' self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone':", "def test_forms_patient_name_is_not_valid_MIN(self): form_data = {'name': self.name_invalid_MIN, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "'<PASSWORD>' self.password_invalid_TYPE = '<PASSWORD>!' self.date_of_birth_valid = '10/12/1990' self.date_of_birth_invalid = '18' self.date_of_birth_invalid_FORMAT = '18'", "'Rua 01, Quadra 10, Lote 15' self.complement_invalid = '' self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "'admin.com' self.email_invalid_TYPE = 'admin.com' self.email_invalid_MIN = 'a@a.a' self.email_invalid_MAX = '<EMAIL>' self.email_invalid_BASE = '<EMAIL>'", "{'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX, 'sex':", "= '18' self.date_of_birth_invalid_FORMAT = '18' self.date_of_birth_invalid_MIN = '10/12/2020' self.sex_valid = 'M' self.sex_invalid =", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_TYPE(self):", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "self.city_invalid = '' self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid = 'Setor Leste' self.neighborhood_invalid = ''", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone':", "'CEP': self.CEP_valid, 'UF': self.UF_invalid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data)", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self): form_data", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid}", "self.CPF_document_valid, 'sex': self.sex_invalid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self): form_data", "test_forms_patient_phone_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MIN, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>,", "self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_invalid} form =", "self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MAX, 'password': <PASSWORD>,", "= 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid = 'Setor Leste' self.neighborhood_invalid = '' self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MIN(self): form_data = {'name':", "'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertTrue(form.is_valid()) def", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_TYPE, 'email':", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self): form_data =", "self.CEP_invalid_MIN = 42 self.CEP_invalid_MAX = 728507351 self.UF_valid = 'DF' self.UF_invalid = '' self.UF_invalid_MIN", "= 'aasdkgasghdhjadjasvdashdjavcdbnmhasdvbdmmasbdnmhamsjdhgegdhjgsavdhabvdbnasd' self.name_invalid_MIN = 'a' self.phone_valid = '1234567890' self.phone_invalid = '456' self.phone_invalid_MIN =", "test_forms_patient_complement_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self):", "def test_forms_patient_UF_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid, 'neighborhood': self.neighborhood_valid,", "self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self): form_data = {'name': self.name_valid,", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': self.password_valid, 'confirm_password': <PASSWORD>, 'CPF_document':", "self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid,", "test_forms_patient_email_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_TYPE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "= {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MAX, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "self.neighborhood_valid, 'complement': self.complement_invalid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid_MAX(self): form_data = {'name': self.name_valid,", "<PASSWORD>, 'CPF_document': self.CPF_document_invalid_MAX, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_TYPE(self): form_data", "{'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MIN, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_invalid_MAX, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid}", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self): form_data = {'name': self.name_valid, 'phone':", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city':", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self):", "'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def", "= '' self.neighborhood_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.complement_valid = 'Rua 01, Quadra 10, Lote 15'", "self.name_invalid_MAX, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "self.phone_invalid_MIN, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid, 'neighborhood': self.neighborhood_valid, 'complement':", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self): form_data = {'name': self.name_invalid_MAX, 'phone':", "self.city_invalid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self): form_data =", "10, Lote 15' self.complement_invalid = '' self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user =", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement':", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MAX(self): form_data = {'name': self.name_invalid_MAX, 'phone': self.phone_valid,", "import PatientForm from user.models import User class TestPatientForm(TestCase): def setUp(self): self.name_valid = '<NAME>'", "self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "self.city_invalid_MAX, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self): form_data =", "'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "'admin.com' self.email_invalid_MIN = 'a@a.a' self.email_invalid_MAX = '<EMAIL>' self.email_invalid_BASE = '<EMAIL>' self.password_valid = '<PASSWORD>'", "<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid_MAX,", "self.sex_valid = 'M' self.sex_invalid = 'A' self.CPF_document_valid = '61367541000' self.CPF_document_invalid = '11111111111' self.CPF_document_invalid_MIN", "'61367541000' self.CPF_document_invalid = '11111111111' self.CPF_document_invalid_MIN = '111111111' self.CPF_document_invalid_MAX = '11111111111' self.CPF_document_invalid_TYPE = '252627282930asdf'", "def test_forms_patient_password_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "'date_of_birth': self.date_of_birth_invalid_FORMAT, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form", "self.name_invalid_MIN, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': self.<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid,", "self.email_invalid_MAX = '<EMAIL>' self.email_invalid_BASE = '<EMAIL>' self.password_valid = '<PASSWORD>' self.password_invalid = '<PASSWORD>' self.password_invalid_MAX", "'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self): form_data =", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_TYPE(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertTrue(form.is_valid()) def test_forms_patient_name_is_not_valid_TYPE(self): form_data = {'name': self.name_invalid_TYPE, 'phone':", "self.UF_valid = 'DF' self.UF_invalid = '' self.UF_invalid_MIN = 'A' self.UF_invalid_MAX = 'AAA' self.city_valid", "self.phone_invalid_TYPE, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_invalid_MAX,", "self.UF_valid, 'city': self.city_invalid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self):", "self.phone_invalid_MAX, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid())", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self):", "= {'name': self.name_invalid_TYPE, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "'456' self.phone_invalid_MIN = '456' self.phone_invalid_TYPE = 'asdaaaaaads' self.phone_invalid_MAX = '456134564898761' self.email_valid = '<EMAIL>'", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MAX,", "'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF':", "'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_invalid, 'city':", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "'AAA' self.city_valid = 'Bras lia' self.city_invalid = '' self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid =", "'email': self.email_invalid_BASE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "= {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_BASE, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid_MAX(self): form_data", "def test_forms_patient_CPF_document_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password':", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data", "{'name': self.name_valid, 'phone': self.phone_invalid_TYPE, 'email': self.email_valid, 'password': <PASSWORD>, 'password_confirmation': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex':", "= {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_name_is_not_valid_MIN(self): form_data = {'name':", "= '' self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user = User() user.email = \"<EMAIL>\"", "form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email':", "self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "self.phone_valid, 'email': self.email_valid, 'password': self.password_valid, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid,", "self.date_of_birth_invalid_MIN = '10/12/2020' self.sex_valid = 'M' self.sex_invalid = 'A' self.CPF_document_valid = '61367541000' self.CPF_document_invalid", "<PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_invalid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid,", "self.sex_invalid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid}", "self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_FORMAT,", "'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_invalid, 'date_of_birth': self.date_of_birth_valid, 'CEP':", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_BASE(self): form_data", "self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self): form_data =", "'neighborhood': self.neighborhood_invalid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid_MAX(self): form_data = {'name':", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CPF_document_is_not_valid_MAX(self): form_data", "self.password_invalid = '<PASSWORD>' self.password_invalid_MAX = '<PASSWORD>' self.password_invalid_MIN = '<PASSWORD>' self.password_invalid_TYPE = '<PASSWORD>!' self.date_of_birth_valid", "<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_invalid_MIN, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid,", "'neighborhood': self.neighborhood_invalid_MAX, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_complement_is_not_valid(self): form_data = {'name':", "test_forms_patient_email_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_invalid_MIN, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "<PASSWORD>, 'CPF_document': self.CPF_document_valid, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid,", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_date_of_birth_is_not_valid_FORMAT(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_neighborhood_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MIN(self): form_data", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_UF_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "test_forms_patient_name_is_not_valid_MAX(self): form_data = {'name': self.name_invalid_MAX, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self): form_data = {'name':", "PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_CEP_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password':", "self.complement_invalid = '' self.complement_invalid_MAX = '''aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa''' user = User() user.email =", "= '' self.city_invalid_MAX = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' self.neighborhood_valid = 'Setor Leste' self.neighborhood_invalid = '' self.neighborhood_invalid_MAX", "{'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE, 'sex':", "self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_city_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "'10/12/1990' self.date_of_birth_invalid = '18' self.date_of_birth_invalid_FORMAT = '18' self.date_of_birth_invalid_MIN = '10/12/2020' self.sex_valid = 'M'", "= PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_password_is_not_valid_MIN(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid,", "self.assertFalse(form.is_valid()) def test_forms_patient_password_confirmation_is_not_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>,", "def setUp(self): self.name_valid = '<NAME>' self.name_invalid = 'a12' self.name_invalid_TYPE = 'a@hjasgdjasd1al' self.name_invalid_MAX =", "self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_email_is_not_valid_MAX(self): form_data =", "'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_MIN, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF':", "'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_invalid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form", "self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>, 'CPF_document': self.CPF_document_invalid_TYPE, 'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid,", "self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_valid, 'complement': self.complement_valid} form = PatientForm(data=form_data) self.assertFalse(form.is_valid()) def test_forms_patient_phone_is_not_valid_MAX(self):", "'sex': self.sex_valid, 'date_of_birth': self.date_of_birth_valid, 'CEP': self.CEP_valid, 'UF': self.UF_valid, 'city': self.city_valid, 'neighborhood': self.neighborhood_invalid_MAX, 'complement':", "test_forms_patient_password_is_not_valid_MAX(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid, 'email': self.email_valid, 'password': <PASSWORD>, 'confirm_password': <PASSWORD>,", "User() user.email = \"<EMAIL>\" user.save() def test_forms_patient_is_valid(self): form_data = {'name': self.name_valid, 'phone': self.phone_valid,", "from user.models import User class TestPatientForm(TestCase): def setUp(self): self.name_valid = '<NAME>' self.name_invalid =" ]
[ "sequence print('\\nTerminating program...') sleep(2) # Terminate sys.exit() else: # Notify the user of", "printing the application header and menu \"\"\" # Clear the console window os.system('cls')", "password list return passList def passParams(): \"\"\" () -> () Function that is", "(op == 1): print('\\n') # Call method that retrieves the password generation parameters", "pass length and how many to generate len = click.prompt('How long would you", "\"\"\" # Clear the console window os.system('cls') # Print the application header &", "= click.prompt('\\n\\nWould you like to generate another set? (y/n) >>', type=str) # Execute", "user. \"\"\" # Prompt the user for their desired pass length and how", "'N' or choice == 'n'): # Notify the user of navigation back to", "+ \"\\033[0m\\033[1m[1] Generate Password(s)\\n\" + \"[2] Exit Program\\n\") def generator(len, num): \"\"\" (int,", "the user as to whether or not they'd like to generate another set", "the password generation parameters passParams() if (choice == 'N' or choice == 'n'):", "main menu and prompt the user for input displayHeader() genLogic() if (op ==", "1 # Return the password list return passList def passParams(): \"\"\" () ->", "break # Display the main menu and prompt the user for input displayHeader()", "re-prompt them for input print('\\033[91mInvalid command, please try again!\\033[0m') genLogic() ####### MAIN PROGRAM", "FUNCTIONS ###### def displayHeader(): \"\"\" () -> () Function that is responsible for", "variable value to the passList passList.append(temp) # Increment the counter i += 1", "choice == 'y'): print('\\n') # Call the function that retrieves the password generation", "many password(s) would you like to generate? >>', type=int) print('\\n') # Assemble the", "and prompt the user for input displayHeader() genLogic() if (op == 2): #", "passwords passList = [] # Initialize a counter variable to assist with generation", "the user. \"\"\" # Prompt the user for their desired pass length and", "command error and re-prompt them for input print('\\033[91mInvalid command, please try again!\\033[0m') genLogic()", "genLogic() if (op == 2): # Notify the user of the termination sequence", ">>', type=int) print('\\n') # Assemble the password list passwordList = generator(len, num) #", "-> () Function that is responsible for printing the application header and menu", "print('\\n') # Assemble the password list passwordList = generator(len, num) # Print the", "Password Generator v1.0\\n\" + \"\\033[92mSource Code By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\" + \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" +", "length and how many to generate len = click.prompt('How long would you like", "Notify the user of the termination sequence print('\\nTerminating program...') sleep(2) # Terminate sys.exit()", "user's choice \"\"\" # Prompt the user for input op = click.prompt('Enter choice", "that is responsible for retrieving the desired password generation paramters of the user.", "Code By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\" + \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1] Generate Password(s)\\n\" + \"[2]", "you like your password(s) to be? >>', type=int) num = click.prompt('How many password(s)", "Print the password list to the console print(*passwordList, sep='\\n') def genLogic(): \"\"\" ()", "sleep ####### FUNCTIONS ###### def displayHeader(): \"\"\" () -> () Function that is", "num: # Assemble the password temp = ''.join(random.choices(string.ascii_lowercase + string.digits, k = len))", "passList passList.append(temp) # Increment the counter i += 1 # Return the password", "passParams() while(True): # Prompt the user as to whether or not they'd like", "counter i += 1 # Return the password list return passList def passParams():", "-> list Function that is repsonsible for generating a random alphanumeric password based", "'Y' or choice == 'y'): print('\\n') # Call the function that retrieves the", "# Display the main menu and prompt the user for input displayHeader() genLogic()", "repsonsible for generating a random alphanumeric password based off the iser request parameters", "# Initialize the list that will hold the generated passwords passList = []", "a random alphanumeric password based off the iser request parameters \"\"\" # Initialize", "of the user. \"\"\" # Prompt the user for their desired pass length", "(int, int) -> list Function that is repsonsible for generating a random alphanumeric", "user of navigation back to the main menu print('Returning you to the main", "termination sequence print('\\nTerminating program...') sleep(2) # Terminate sys.exit() else: # Notify the user", "REQUIRED IMPORTS ####### import os import sys import click import random import string", "not they'd like to generate another set choice = click.prompt('\\n\\nWould you like to", "def passParams(): \"\"\" () -> () Function that is responsible for retrieving the", "on the user's choice \"\"\" # Prompt the user for input op =", "menu \"\"\" # Clear the console window os.system('cls') # Print the application header", "temp variable value to the passList passList.append(temp) # Increment the counter i +=", "== 1): print('\\n') # Call method that retrieves the password generation parameters passParams()", "\\033[0m\\033[1m<NAME> (CoderMP)\\n\" + \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1] Generate Password(s)\\n\" + \"[2] Exit Program\\n\")", "generating a random alphanumeric password based off the iser request parameters \"\"\" #", "# Notify the user of their command error and re-prompt them for input", "the main menu and prompt the user for input displayHeader() genLogic() if (op", "2): # Notify the user of the termination sequence print('\\nTerminating program...') sleep(2) #", "###### def displayHeader(): \"\"\" () -> () Function that is responsible for printing", "# Notify the user of navigation back to the main menu print('Returning you", "choice == 'n'): # Notify the user of navigation back to the main", "(op == 2): # Notify the user of the termination sequence print('\\nTerminating program...')", "print(*passwordList, sep='\\n') def genLogic(): \"\"\" () -> () Function that is responsible for", "if (choice == 'Y' or choice == 'y'): print('\\n') # Call the function", "Function that is responsible for retrieving the desired password generation paramters of the", "many to generate len = click.prompt('How long would you like your password(s) to", "the password list passwordList = generator(len, num) # Print the password list to", "list Function that is repsonsible for generating a random alphanumeric password based off", "accordingly if (choice == 'Y' or choice == 'y'): print('\\n') # Call the", "# Print the password list to the console print(*passwordList, sep='\\n') def genLogic(): \"\"\"", "len)) # Append the temp variable value to the passList passList.append(temp) # Increment", "<reponame>CoderMP/PythonPassGen<gh_stars>0 ####### REQUIRED IMPORTS ####### import os import sys import click import random", "Clear the console window os.system('cls') # Print the application header & menu print(\"\\033[94m------------------------------\\n\"", "off the iser request parameters \"\"\" # Initialize the list that will hold", "####### FUNCTIONS ###### def displayHeader(): \"\"\" () -> () Function that is responsible", "another set? (y/n) >>', type=str) # Execute accordingly if (choice == 'Y' or", "to be? >>', type=int) num = click.prompt('How many password(s) would you like to", "num): \"\"\" (int, int) -> list Function that is repsonsible for generating a", "() -> () Function that is responsible for printing the application header and", "click.prompt('How long would you like your password(s) to be? >>', type=int) num =", "request parameters \"\"\" # Initialize the list that will hold the generated passwords", "sleep(2) # Terminate sys.exit() else: # Notify the user of their command error", "+ \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1] Generate Password(s)\\n\" + \"[2] Exit Program\\n\") def generator(len,", "the application logic based on the user's choice \"\"\" # Prompt the user", "\"\"\" # Prompt the user for input op = click.prompt('Enter choice >>', type=int)", "method that retrieves the password generation parameters passParams() while(True): # Prompt the user", "the user for their desired pass length and how many to generate len", "Function that is repsonsible for generating a random alphanumeric password based off the", "\"\\033[0m\\033[1m[1] Generate Password(s)\\n\" + \"[2] Exit Program\\n\") def generator(len, num): \"\"\" (int, int)", "responsible for retrieving the desired password generation paramters of the user. \"\"\" #", "click.prompt('\\n\\nWould you like to generate another set? (y/n) >>', type=str) # Execute accordingly", "Generator \\033[94m||\\n\" + \"------------------------------\\n\\n\" + \"\\033[0mWelcome to Password Generator v1.0\\n\" + \"\\033[92mSource Code", "sys.exit() else: # Notify the user of their command error and re-prompt them", "click import random import string from time import sleep ####### FUNCTIONS ###### def", "is repsonsible for generating a random alphanumeric password based off the iser request", "\"|| \\033[92mPassword Generator \\033[94m||\\n\" + \"------------------------------\\n\\n\" + \"\\033[0mWelcome to Password Generator v1.0\\n\" +", "Initialize the list that will hold the generated passwords passList = [] #", "password temp = ''.join(random.choices(string.ascii_lowercase + string.digits, k = len)) # Append the temp", "Display the main menu and prompt the user for input displayHeader() genLogic() if", "== 'N' or choice == 'n'): # Notify the user of navigation back", "generated passwords passList = [] # Initialize a counter variable to assist with", "responsible for executing the application logic based on the user's choice \"\"\" #", "\"\"\" # Prompt the user for their desired pass length and how many", "== 2): # Notify the user of the termination sequence print('\\nTerminating program...') sleep(2)", "long would you like your password(s) to be? >>', type=int) num = click.prompt('How", "the list that will hold the generated passwords passList = [] # Initialize", "generate? >>', type=int) print('\\n') # Assemble the password list passwordList = generator(len, num)", "type=int) num = click.prompt('How many password(s) would you like to generate? >>', type=int)", "num = click.prompt('How many password(s) would you like to generate? >>', type=int) print('\\n')", "Prompt the user for input op = click.prompt('Enter choice >>', type=int) if (op", "like to generate another set? (y/n) >>', type=str) # Execute accordingly if (choice", "for printing the application header and menu \"\"\" # Clear the console window", "import string from time import sleep ####### FUNCTIONS ###### def displayHeader(): \"\"\" ()", "if (op == 1): print('\\n') # Call method that retrieves the password generation", "(CoderMP)\\n\" + \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1] Generate Password(s)\\n\" + \"[2] Exit Program\\n\") def", "num) # Print the password list to the console print(*passwordList, sep='\\n') def genLogic():", "Increment the counter i += 1 # Return the password list return passList", "# Terminate sys.exit() else: # Notify the user of their command error and", "print('\\033[91mInvalid command, please try again!\\033[0m') genLogic() ####### MAIN PROGRAM ####### if __name__ ==", "from time import sleep ####### FUNCTIONS ###### def displayHeader(): \"\"\" () -> ()", "they'd like to generate another set choice = click.prompt('\\n\\nWould you like to generate", "the passList passList.append(temp) # Increment the counter i += 1 # Return the", "with generation i = 0 while i < num: # Assemble the password", "Generate Password(s)\\n\" + \"[2] Exit Program\\n\") def generator(len, num): \"\"\" (int, int) ->", "window os.system('cls') # Print the application header & menu print(\"\\033[94m------------------------------\\n\" + \"|| \\033[92mPassword", "Return the password list return passList def passParams(): \"\"\" () -> () Function", "based on the user's choice \"\"\" # Prompt the user for input op", "Assemble the password list passwordList = generator(len, num) # Print the password list", "\"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1] Generate Password(s)\\n\" + \"[2] Exit Program\\n\") def generator(len, num):", "By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\" + \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1] Generate Password(s)\\n\" + \"[2] Exit", "parameters passParams() if (choice == 'N' or choice == 'n'): # Notify the", "() Function that is responsible for retrieving the desired password generation paramters of", "if (op == 2): # Notify the user of the termination sequence print('\\nTerminating", "user of the termination sequence print('\\nTerminating program...') sleep(2) # Terminate sys.exit() else: #", "set? (y/n) >>', type=str) # Execute accordingly if (choice == 'Y' or choice", "= click.prompt('How many password(s) would you like to generate? >>', type=int) print('\\n') #", "# Call the function that retrieves the password generation parameters passParams() if (choice", "as to whether or not they'd like to generate another set choice =", "sleep(1.3) os.system('cls') break # Display the main menu and prompt the user for", "error and re-prompt them for input print('\\033[91mInvalid command, please try again!\\033[0m') genLogic() #######", "the application header & menu print(\"\\033[94m------------------------------\\n\" + \"|| \\033[92mPassword Generator \\033[94m||\\n\" + \"------------------------------\\n\\n\"", "+ \"|| \\033[92mPassword Generator \\033[94m||\\n\" + \"------------------------------\\n\\n\" + \"\\033[0mWelcome to Password Generator v1.0\\n\"", "and how many to generate len = click.prompt('How long would you like your", "Function that is responsible for printing the application header and menu \"\"\" #", "console print(*passwordList, sep='\\n') def genLogic(): \"\"\" () -> () Function that is responsible", "else: # Notify the user of their command error and re-prompt them for", "menu print('Returning you to the main menu....') sleep(1.3) os.system('cls') break # Display the", "+ string.digits, k = len)) # Append the temp variable value to the", "set choice = click.prompt('\\n\\nWould you like to generate another set? (y/n) >>', type=str)", "will hold the generated passwords passList = [] # Initialize a counter variable", "-> () Function that is responsible for executing the application logic based on", "choice >>', type=int) if (op == 1): print('\\n') # Call method that retrieves", "generation paramters of the user. \"\"\" # Prompt the user for their desired", "input op = click.prompt('Enter choice >>', type=int) if (op == 1): print('\\n') #", "paramters of the user. \"\"\" # Prompt the user for their desired pass", "# Prompt the user for input op = click.prompt('Enter choice >>', type=int) if", "menu....') sleep(1.3) os.system('cls') break # Display the main menu and prompt the user", "Initialize a counter variable to assist with generation i = 0 while i", "you like to generate? >>', type=int) print('\\n') # Assemble the password list passwordList", "# Append the temp variable value to the passList passList.append(temp) # Increment the", "console window os.system('cls') # Print the application header & menu print(\"\\033[94m------------------------------\\n\" + \"||", "to the console print(*passwordList, sep='\\n') def genLogic(): \"\"\" () -> () Function that", "password generation paramters of the user. \"\"\" # Prompt the user for their", "def generator(len, num): \"\"\" (int, int) -> list Function that is repsonsible for", "-> () Function that is responsible for retrieving the desired password generation paramters", "import random import string from time import sleep ####### FUNCTIONS ###### def displayHeader():", "temp = ''.join(random.choices(string.ascii_lowercase + string.digits, k = len)) # Append the temp variable", "+ \"------------------------------\\n\\n\" + \"\\033[0mWelcome to Password Generator v1.0\\n\" + \"\\033[92mSource Code By: \\033[0m\\033[1m<NAME>", "like to generate? >>', type=int) print('\\n') # Assemble the password list passwordList =", "the counter i += 1 # Return the password list return passList def", "of navigation back to the main menu print('Returning you to the main menu....')", "your password(s) to be? >>', type=int) num = click.prompt('How many password(s) would you", "iser request parameters \"\"\" # Initialize the list that will hold the generated", "+ \"\\033[0mWelcome to Password Generator v1.0\\n\" + \"\\033[92mSource Code By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\" +", "+ \"\\033[92mSource Code By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\" + \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1] Generate Password(s)\\n\"", "Terminate sys.exit() else: # Notify the user of their command error and re-prompt", "[] # Initialize a counter variable to assist with generation i = 0", "that is responsible for printing the application header and menu \"\"\" # Clear", "() -> () Function that is responsible for executing the application logic based", "\"\"\" (int, int) -> list Function that is repsonsible for generating a random", "would you like to generate? >>', type=int) print('\\n') # Assemble the password list", "passwordList = generator(len, num) # Print the password list to the console print(*passwordList,", "and re-prompt them for input print('\\033[91mInvalid command, please try again!\\033[0m') genLogic() ####### MAIN", "\\033[92mPassword Generator \\033[94m||\\n\" + \"------------------------------\\n\\n\" + \"\\033[0mWelcome to Password Generator v1.0\\n\" + \"\\033[92mSource", "# Assemble the password temp = ''.join(random.choices(string.ascii_lowercase + string.digits, k = len)) #", "the function that retrieves the password generation parameters passParams() if (choice == 'N'", "generation i = 0 while i < num: # Assemble the password temp", "sep='\\n') def genLogic(): \"\"\" () -> () Function that is responsible for executing", "generation parameters passParams() while(True): # Prompt the user as to whether or not", "that is repsonsible for generating a random alphanumeric password based off the iser", "displayHeader(): \"\"\" () -> () Function that is responsible for printing the application", "back to the main menu print('Returning you to the main menu....') sleep(1.3) os.system('cls')", "the generated passwords passList = [] # Initialize a counter variable to assist", "Program\\n\") def generator(len, num): \"\"\" (int, int) -> list Function that is repsonsible", "input print('\\033[91mInvalid command, please try again!\\033[0m') genLogic() ####### MAIN PROGRAM ####### if __name__", "# Call method that retrieves the password generation parameters passParams() while(True): # Prompt", "Prompt the user as to whether or not they'd like to generate another", "# Return the password list return passList def passParams(): \"\"\" () -> ()", "parameters \"\"\" # Initialize the list that will hold the generated passwords passList", "# Prompt the user as to whether or not they'd like to generate", "def displayHeader(): \"\"\" () -> () Function that is responsible for printing the", "+= 1 # Return the password list return passList def passParams(): \"\"\" ()", "sys import click import random import string from time import sleep ####### FUNCTIONS", "\"\"\" () -> () Function that is responsible for retrieving the desired password", "Password(s)\\n\" + \"[2] Exit Program\\n\") def generator(len, num): \"\"\" (int, int) -> list", "while i < num: # Assemble the password temp = ''.join(random.choices(string.ascii_lowercase + string.digits,", "i < num: # Assemble the password temp = ''.join(random.choices(string.ascii_lowercase + string.digits, k", "i = 0 while i < num: # Assemble the password temp =", "choice = click.prompt('\\n\\nWould you like to generate another set? (y/n) >>', type=str) #", "& menu print(\"\\033[94m------------------------------\\n\" + \"|| \\033[92mPassword Generator \\033[94m||\\n\" + \"------------------------------\\n\\n\" + \"\\033[0mWelcome to", "the user of the termination sequence print('\\nTerminating program...') sleep(2) # Terminate sys.exit() else:", "how many to generate len = click.prompt('How long would you like your password(s)", "the password temp = ''.join(random.choices(string.ascii_lowercase + string.digits, k = len)) # Append the", "import os import sys import click import random import string from time import", "value to the passList passList.append(temp) # Increment the counter i += 1 #", "####### REQUIRED IMPORTS ####### import os import sys import click import random import", "int) -> list Function that is repsonsible for generating a random alphanumeric password", "the password list return passList def passParams(): \"\"\" () -> () Function that", "like to generate another set choice = click.prompt('\\n\\nWould you like to generate another", "() -> () Function that is responsible for retrieving the desired password generation", "type=int) print('\\n') # Assemble the password list passwordList = generator(len, num) # Print", "the user of navigation back to the main menu print('Returning you to the", "Print the application header & menu print(\"\\033[94m------------------------------\\n\" + \"|| \\033[92mPassword Generator \\033[94m||\\n\" +", "is responsible for printing the application header and menu \"\"\" # Clear the", "+ \"[2] Exit Program\\n\") def generator(len, num): \"\"\" (int, int) -> list Function", "print('\\n') # Call the function that retrieves the password generation parameters passParams() if", "generator(len, num): \"\"\" (int, int) -> list Function that is repsonsible for generating", "application header and menu \"\"\" # Clear the console window os.system('cls') # Print", "to the main menu....') sleep(1.3) os.system('cls') break # Display the main menu and", "their command error and re-prompt them for input print('\\033[91mInvalid command, please try again!\\033[0m')", "variable to assist with generation i = 0 while i < num: #", "# Increment the counter i += 1 # Return the password list return", "is responsible for retrieving the desired password generation paramters of the user. \"\"\"", "= ''.join(random.choices(string.ascii_lowercase + string.digits, k = len)) # Append the temp variable value", "the termination sequence print('\\nTerminating program...') sleep(2) # Terminate sys.exit() else: # Notify the", "# Print the application header & menu print(\"\\033[94m------------------------------\\n\" + \"|| \\033[92mPassword Generator \\033[94m||\\n\"", "or choice == 'n'): # Notify the user of navigation back to the", "navigation back to the main menu print('Returning you to the main menu....') sleep(1.3)", "the user of their command error and re-prompt them for input print('\\033[91mInvalid command,", "op = click.prompt('Enter choice >>', type=int) if (op == 1): print('\\n') # Call", "Execute accordingly if (choice == 'Y' or choice == 'y'): print('\\n') # Call", "hold the generated passwords passList = [] # Initialize a counter variable to", "the iser request parameters \"\"\" # Initialize the list that will hold the", "# Prompt the user for their desired pass length and how many to", "\"\"\" () -> () Function that is responsible for executing the application logic", "desired pass length and how many to generate len = click.prompt('How long would", "os.system('cls') # Print the application header & menu print(\"\\033[94m------------------------------\\n\" + \"|| \\033[92mPassword Generator", "logic based on the user's choice \"\"\" # Prompt the user for input", "to generate another set? (y/n) >>', type=str) # Execute accordingly if (choice ==", "password generation parameters passParams() if (choice == 'N' or choice == 'n'): #", "password(s) to be? >>', type=int) num = click.prompt('How many password(s) would you like", "= 0 while i < num: # Assemble the password temp = ''.join(random.choices(string.ascii_lowercase", "the password list to the console print(*passwordList, sep='\\n') def genLogic(): \"\"\" () ->", "that retrieves the password generation parameters passParams() while(True): # Prompt the user as", "or choice == 'y'): print('\\n') # Call the function that retrieves the password", "for input displayHeader() genLogic() if (op == 2): # Notify the user of", "random alphanumeric password based off the iser request parameters \"\"\" # Initialize the", "to generate? >>', type=int) print('\\n') # Assemble the password list passwordList = generator(len,", "\"\\033[92mSource Code By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\" + \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1] Generate Password(s)\\n\" +", "program...') sleep(2) # Terminate sys.exit() else: # Notify the user of their command", "retrieving the desired password generation paramters of the user. \"\"\" # Prompt the", "passParams() if (choice == 'N' or choice == 'n'): # Notify the user", "of their command error and re-prompt them for input print('\\033[91mInvalid command, please try", "you to the main menu....') sleep(1.3) os.system('cls') break # Display the main menu", "len = click.prompt('How long would you like your password(s) to be? >>', type=int)", "= [] # Initialize a counter variable to assist with generation i =", "() Function that is responsible for executing the application logic based on the", "retrieves the password generation parameters passParams() while(True): # Prompt the user as to", "\"------------------------------\\n\\n\" + \"\\033[0mWelcome to Password Generator v1.0\\n\" + \"\\033[92mSource Code By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\"", "# Notify the user of the termination sequence print('\\nTerminating program...') sleep(2) # Terminate", "generate another set? (y/n) >>', type=str) # Execute accordingly if (choice == 'Y'", "passList.append(temp) # Increment the counter i += 1 # Return the password list", "input displayHeader() genLogic() if (op == 2): # Notify the user of the", "if (choice == 'N' or choice == 'n'): # Notify the user of", "based off the iser request parameters \"\"\" # Initialize the list that will", "passList def passParams(): \"\"\" () -> () Function that is responsible for retrieving", "while(True): # Prompt the user as to whether or not they'd like to", "is responsible for executing the application logic based on the user's choice \"\"\"", "list to the console print(*passwordList, sep='\\n') def genLogic(): \"\"\" () -> () Function", "Assemble the password temp = ''.join(random.choices(string.ascii_lowercase + string.digits, k = len)) # Append", "def genLogic(): \"\"\" () -> () Function that is responsible for executing the", "for input op = click.prompt('Enter choice >>', type=int) if (op == 1): print('\\n')", "passParams(): \"\"\" () -> () Function that is responsible for retrieving the desired", "user for input displayHeader() genLogic() if (op == 2): # Notify the user", "== 'n'): # Notify the user of navigation back to the main menu", "== 'y'): print('\\n') # Call the function that retrieves the password generation parameters", "user as to whether or not they'd like to generate another set choice", "the main menu....') sleep(1.3) os.system('cls') break # Display the main menu and prompt", "password generation parameters passParams() while(True): # Prompt the user as to whether or", "list return passList def passParams(): \"\"\" () -> () Function that is responsible", "0 while i < num: # Assemble the password temp = ''.join(random.choices(string.ascii_lowercase +", "import click import random import string from time import sleep ####### FUNCTIONS ######", "for executing the application logic based on the user's choice \"\"\" # Prompt", "user for input op = click.prompt('Enter choice >>', type=int) if (op == 1):", "'y'): print('\\n') # Call the function that retrieves the password generation parameters passParams()", "prompt the user for input displayHeader() genLogic() if (op == 2): # Notify", "them for input print('\\033[91mInvalid command, please try again!\\033[0m') genLogic() ####### MAIN PROGRAM #######", "function that retrieves the password generation parameters passParams() if (choice == 'N' or", "the user for input displayHeader() genLogic() if (op == 2): # Notify the", "to the passList passList.append(temp) # Increment the counter i += 1 # Return", "that is responsible for executing the application logic based on the user's choice", "genLogic(): \"\"\" () -> () Function that is responsible for executing the application", "string from time import sleep ####### FUNCTIONS ###### def displayHeader(): \"\"\" () ->", "menu and prompt the user for input displayHeader() genLogic() if (op == 2):", "v1.0\\n\" + \"\\033[92mSource Code By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\" + \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1] Generate", "to generate len = click.prompt('How long would you like your password(s) to be?", "password based off the iser request parameters \"\"\" # Initialize the list that", "to the main menu print('Returning you to the main menu....') sleep(1.3) os.system('cls') break", "Append the temp variable value to the passList passList.append(temp) # Increment the counter", "menu print(\"\\033[94m------------------------------\\n\" + \"|| \\033[92mPassword Generator \\033[94m||\\n\" + \"------------------------------\\n\\n\" + \"\\033[0mWelcome to Password", "Function that is responsible for executing the application logic based on the user's", "to assist with generation i = 0 while i < num: # Assemble", "Generator v1.0\\n\" + \"\\033[92mSource Code By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\" + \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1]", "assist with generation i = 0 while i < num: # Assemble the", "whether or not they'd like to generate another set choice = click.prompt('\\n\\nWould you", ">>', type=str) # Execute accordingly if (choice == 'Y' or choice == 'y'):", "generate another set choice = click.prompt('\\n\\nWould you like to generate another set? (y/n)", "print('\\nTerminating program...') sleep(2) # Terminate sys.exit() else: # Notify the user of their", "= generator(len, num) # Print the password list to the console print(*passwordList, sep='\\n')", "generation parameters passParams() if (choice == 'N' or choice == 'n'): # Notify", "user of their command error and re-prompt them for input print('\\033[91mInvalid command, please", "the user's choice \"\"\" # Prompt the user for input op = click.prompt('Enter", "type=str) # Execute accordingly if (choice == 'Y' or choice == 'y'): print('\\n')", "the temp variable value to the passList passList.append(temp) # Increment the counter i", "for their desired pass length and how many to generate len = click.prompt('How", "be? >>', type=int) num = click.prompt('How many password(s) would you like to generate?", "1): print('\\n') # Call method that retrieves the password generation parameters passParams() while(True):", "####### import os import sys import click import random import string from time", "the console print(*passwordList, sep='\\n') def genLogic(): \"\"\" () -> () Function that is", "main menu....') sleep(1.3) os.system('cls') break # Display the main menu and prompt the", "< num: # Assemble the password temp = ''.join(random.choices(string.ascii_lowercase + string.digits, k =", "displayHeader() genLogic() if (op == 2): # Notify the user of the termination", "that will hold the generated passwords passList = [] # Initialize a counter", "(choice == 'N' or choice == 'n'): # Notify the user of navigation", "k = len)) # Append the temp variable value to the passList passList.append(temp)", "'n'): # Notify the user of navigation back to the main menu print('Returning", "header & menu print(\"\\033[94m------------------------------\\n\" + \"|| \\033[92mPassword Generator \\033[94m||\\n\" + \"------------------------------\\n\\n\" + \"\\033[0mWelcome", "(choice == 'Y' or choice == 'y'): print('\\n') # Call the function that", "generate len = click.prompt('How long would you like your password(s) to be? >>',", "print(\"\\033[94m------------------------------\\n\" + \"|| \\033[92mPassword Generator \\033[94m||\\n\" + \"------------------------------\\n\\n\" + \"\\033[0mWelcome to Password Generator", "Notify the user of navigation back to the main menu print('Returning you to", "passList = [] # Initialize a counter variable to assist with generation i", "password list passwordList = generator(len, num) # Print the password list to the", "click.prompt('Enter choice >>', type=int) if (op == 1): print('\\n') # Call method that", "the main menu print('Returning you to the main menu....') sleep(1.3) os.system('cls') break #", "responsible for printing the application header and menu \"\"\" # Clear the console", "try again!\\033[0m') genLogic() ####### MAIN PROGRAM ####### if __name__ == '__main__': displayHeader() genLogic()", "list that will hold the generated passwords passList = [] # Initialize a", ">>', type=int) if (op == 1): print('\\n') # Call method that retrieves the", "os.system('cls') break # Display the main menu and prompt the user for input", "string.digits, k = len)) # Append the temp variable value to the passList", "\"\"\" # Initialize the list that will hold the generated passwords passList =", "= click.prompt('Enter choice >>', type=int) if (op == 1): print('\\n') # Call method", "for generating a random alphanumeric password based off the iser request parameters \"\"\"", "(y/n) >>', type=str) # Execute accordingly if (choice == 'Y' or choice ==", "counter variable to assist with generation i = 0 while i < num:", "generator(len, num) # Print the password list to the console print(*passwordList, sep='\\n') def", "header and menu \"\"\" # Clear the console window os.system('cls') # Print the", "desired password generation paramters of the user. \"\"\" # Prompt the user for", "main menu print('Returning you to the main menu....') sleep(1.3) os.system('cls') break # Display", "Notify the user of their command error and re-prompt them for input print('\\033[91mInvalid", "\"[2] Exit Program\\n\") def generator(len, num): \"\"\" (int, int) -> list Function that", "alphanumeric password based off the iser request parameters \"\"\" # Initialize the list", "click.prompt('How many password(s) would you like to generate? >>', type=int) print('\\n') # Assemble", "like your password(s) to be? >>', type=int) num = click.prompt('How many password(s) would", "\"\\033[0mWelcome to Password Generator v1.0\\n\" + \"\\033[92mSource Code By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\" + \"\\033[91mLicense:", "and menu \"\"\" # Clear the console window os.system('cls') # Print the application", "# Assemble the password list passwordList = generator(len, num) # Print the password", "user for their desired pass length and how many to generate len =", "you like to generate another set? (y/n) >>', type=str) # Execute accordingly if", "password(s) would you like to generate? >>', type=int) print('\\n') # Assemble the password", "Call the function that retrieves the password generation parameters passParams() if (choice ==", "Exit Program\\n\") def generator(len, num): \"\"\" (int, int) -> list Function that is", "that retrieves the password generation parameters passParams() if (choice == 'N' or choice", "the console window os.system('cls') # Print the application header & menu print(\"\\033[94m------------------------------\\n\" +", "to Password Generator v1.0\\n\" + \"\\033[92mSource Code By: \\033[0m\\033[1m<NAME> (CoderMP)\\n\" + \"\\033[91mLicense: \\033[0m\\033[1mMIT\\n\\n\"", "return passList def passParams(): \"\"\" () -> () Function that is responsible for", "application header & menu print(\"\\033[94m------------------------------\\n\" + \"|| \\033[92mPassword Generator \\033[94m||\\n\" + \"------------------------------\\n\\n\" +", "Prompt the user for their desired pass length and how many to generate", "another set choice = click.prompt('\\n\\nWould you like to generate another set? (y/n) >>',", "a counter variable to assist with generation i = 0 while i <", "type=int) if (op == 1): print('\\n') # Call method that retrieves the password", "''.join(random.choices(string.ascii_lowercase + string.digits, k = len)) # Append the temp variable value to", "application logic based on the user's choice \"\"\" # Prompt the user for", "for retrieving the desired password generation paramters of the user. \"\"\" # Prompt", "parameters passParams() while(True): # Prompt the user as to whether or not they'd", "# Clear the console window os.system('cls') # Print the application header & menu", "command, please try again!\\033[0m') genLogic() ####### MAIN PROGRAM ####### if __name__ == '__main__':", "import sleep ####### FUNCTIONS ###### def displayHeader(): \"\"\" () -> () Function that", "for input print('\\033[91mInvalid command, please try again!\\033[0m') genLogic() ####### MAIN PROGRAM ####### if", "to generate another set choice = click.prompt('\\n\\nWould you like to generate another set?", "the password generation parameters passParams() while(True): # Prompt the user as to whether", "would you like your password(s) to be? >>', type=int) num = click.prompt('How many", "print('\\n') # Call method that retrieves the password generation parameters passParams() while(True): #", "os import sys import click import random import string from time import sleep", "the application header and menu \"\"\" # Clear the console window os.system('cls') #", "list passwordList = generator(len, num) # Print the password list to the console", "= len)) # Append the temp variable value to the passList passList.append(temp) #", "to whether or not they'd like to generate another set choice = click.prompt('\\n\\nWould", "executing the application logic based on the user's choice \"\"\" # Prompt the", "random import string from time import sleep ####### FUNCTIONS ###### def displayHeader(): \"\"\"", "choice \"\"\" # Prompt the user for input op = click.prompt('Enter choice >>',", "\"\"\" () -> () Function that is responsible for printing the application header", "# Initialize a counter variable to assist with generation i = 0 while", "= click.prompt('How long would you like your password(s) to be? >>', type=int) num", "the user for input op = click.prompt('Enter choice >>', type=int) if (op ==", "Call method that retrieves the password generation parameters passParams() while(True): # Prompt the", ">>', type=int) num = click.prompt('How many password(s) would you like to generate? >>',", "# Execute accordingly if (choice == 'Y' or choice == 'y'): print('\\n') #", "print('Returning you to the main menu....') sleep(1.3) os.system('cls') break # Display the main", "i += 1 # Return the password list return passList def passParams(): \"\"\"", "or not they'd like to generate another set choice = click.prompt('\\n\\nWould you like", "import sys import click import random import string from time import sleep #######", "of the termination sequence print('\\nTerminating program...') sleep(2) # Terminate sys.exit() else: # Notify", "their desired pass length and how many to generate len = click.prompt('How long", "\\033[0m\\033[1mMIT\\n\\n\" + \"\\033[0m\\033[1m[1] Generate Password(s)\\n\" + \"[2] Exit Program\\n\") def generator(len, num): \"\"\"", "\\033[94m||\\n\" + \"------------------------------\\n\\n\" + \"\\033[0mWelcome to Password Generator v1.0\\n\" + \"\\033[92mSource Code By:", "IMPORTS ####### import os import sys import click import random import string from", "() Function that is responsible for printing the application header and menu \"\"\"", "password list to the console print(*passwordList, sep='\\n') def genLogic(): \"\"\" () -> ()", "please try again!\\033[0m') genLogic() ####### MAIN PROGRAM ####### if __name__ == '__main__': displayHeader()", "retrieves the password generation parameters passParams() if (choice == 'N' or choice ==", "== 'Y' or choice == 'y'): print('\\n') # Call the function that retrieves", "the desired password generation paramters of the user. \"\"\" # Prompt the user", "time import sleep ####### FUNCTIONS ###### def displayHeader(): \"\"\" () -> () Function" ]
[ "None backend = None context = None try: context = zmq.Context(1) # Socket", "backend != None: backend.close() if context != None: context.term() if __name__ == \"__main__\":", "python3 import os import sys import time import zmq def main(): frontend =", "time import zmq def main(): frontend = None backend = None context =", "backend = None context = None try: context = zmq.Context(1) # Socket facing", "= None backend = None context = None try: context = zmq.Context(1) #", "= None try: context = zmq.Context(1) # Socket facing clients frontend = context.socket(zmq.SUB)", "os import sys import time import zmq def main(): frontend = None backend", "None try: context = zmq.Context(1) # Socket facing clients frontend = context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\")", "pass if frontend != None: frontend.close() if backend != None: backend.close() if context", "frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\") # Socket facing services backend = context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend,", "if backend != None: backend.close() if context != None: context.term() if __name__ ==", "import time import zmq def main(): frontend = None backend = None context", "print(\"bringing down zmq device\") finally: pass if frontend != None: frontend.close() if backend", "zmq device\") finally: pass if frontend != None: frontend.close() if backend != None:", "frontend = None backend = None context = None try: context = zmq.Context(1)", "e: print(e) print(\"bringing down zmq device\") finally: pass if frontend != None: frontend.close()", "!= None: frontend.close() if backend != None: backend.close() if context != None: context.term()", "None: frontend.close() if backend != None: backend.close() if context != None: context.term() if", "#!/usr/bin/env python3 import os import sys import time import zmq def main(): frontend", "frontend = context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\") # Socket facing services backend = context.socket(zmq.PUB)", "finally: pass if frontend != None: frontend.close() if backend != None: backend.close() if", "frontend.close() if backend != None: backend.close() if context != None: context.term() if __name__", "context = zmq.Context(1) # Socket facing clients frontend = context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\")", "main(): frontend = None backend = None context = None try: context =", "device\") finally: pass if frontend != None: frontend.close() if backend != None: backend.close()", "zmq.Context(1) # Socket facing clients frontend = context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\") # Socket", "# Socket facing services backend = context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend, backend) except Exception", "frontend.setsockopt(zmq.SUBSCRIBE, b\"\") # Socket facing services backend = context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend, backend)", "clients frontend = context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\") # Socket facing services backend =", "services backend = context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend, backend) except Exception as e: print(e)", "frontend != None: frontend.close() if backend != None: backend.close() if context != None:", "!= None: backend.close() if context != None: context.term() if __name__ == \"__main__\": main()", "import sys import time import zmq def main(): frontend = None backend =", "= zmq.Context(1) # Socket facing clients frontend = context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\") #", "# Socket facing clients frontend = context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\") # Socket facing", "backend) except Exception as e: print(e) print(\"bringing down zmq device\") finally: pass if", "print(e) print(\"bringing down zmq device\") finally: pass if frontend != None: frontend.close() if", "try: context = zmq.Context(1) # Socket facing clients frontend = context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE,", "Socket facing clients frontend = context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\") # Socket facing services", "def main(): frontend = None backend = None context = None try: context", "Socket facing services backend = context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend, backend) except Exception as", "backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend, backend) except Exception as e: print(e) print(\"bringing down zmq device\")", "down zmq device\") finally: pass if frontend != None: frontend.close() if backend !=", "= context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\") # Socket facing services backend = context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\")", "Exception as e: print(e) print(\"bringing down zmq device\") finally: pass if frontend !=", "b\"\") # Socket facing services backend = context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend, backend) except", "context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\") # Socket facing services backend = context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER,", "as e: print(e) print(\"bringing down zmq device\") finally: pass if frontend != None:", "import zmq def main(): frontend = None backend = None context = None", "<filename>device.py #!/usr/bin/env python3 import os import sys import time import zmq def main():", "except Exception as e: print(e) print(\"bringing down zmq device\") finally: pass if frontend", "context = None try: context = zmq.Context(1) # Socket facing clients frontend =", "backend = context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend, backend) except Exception as e: print(e) print(\"bringing", "= context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend, backend) except Exception as e: print(e) print(\"bringing down", "context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend, backend) except Exception as e: print(e) print(\"bringing down zmq", "zmq def main(): frontend = None backend = None context = None try:", "import os import sys import time import zmq def main(): frontend = None", "= None context = None try: context = zmq.Context(1) # Socket facing clients", "if frontend != None: frontend.close() if backend != None: backend.close() if context !=", "frontend, backend) except Exception as e: print(e) print(\"bringing down zmq device\") finally: pass", "None context = None try: context = zmq.Context(1) # Socket facing clients frontend", "facing services backend = context.socket(zmq.PUB) backend.bind(\"tcp://*:5560\") zmq.device(zmq.FORWARDER, frontend, backend) except Exception as e:", "sys import time import zmq def main(): frontend = None backend = None", "zmq.device(zmq.FORWARDER, frontend, backend) except Exception as e: print(e) print(\"bringing down zmq device\") finally:", "facing clients frontend = context.socket(zmq.SUB) frontend.bind(\"tcp://*:5559\") frontend.setsockopt(zmq.SUBSCRIBE, b\"\") # Socket facing services backend" ]
[ "flask import Blueprint # instantiating the blue print rbac_blueprint = Blueprint('rbac-service', __name__, url_prefix='/v1')", "\"\"\"Blueprint module\"\"\" from flask import Blueprint # instantiating the blue print rbac_blueprint =", "module\"\"\" from flask import Blueprint # instantiating the blue print rbac_blueprint = Blueprint('rbac-service',", "from flask import Blueprint # instantiating the blue print rbac_blueprint = Blueprint('rbac-service', __name__,", "<reponame>Nardri/rbac-service \"\"\"Blueprint module\"\"\" from flask import Blueprint # instantiating the blue print rbac_blueprint" ]
[ "24) + first_link_uid for x in range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids = np.array([((x + 1)", "#image_id*x for paralel dataset generation, otherwise 0 def init_data(self): #COCO \"\"\" Initialize data", "train!\") raise SystemExit(0) # initialize pybullet env env = generator.get_env() first_link_uid = env.robot.robot_uid", "# clear data and continue data[\"images\"].clear() data[\"annotations\"].clear() class GeneratorDope: #DOPE def __init__(self): self.object_settings", "#prepare data strucuture data, name = generator.data_struct_image() for object_uid in obj_ids: #loop through", "data \"\"\" data_train = dict( images=[# file_name, height, width, id ], type='instances', annotations=[#", "cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img) data[t] = img print(\"Image {}/{}\".format(t, steps)) self.env.close() # main if", "\"img_{}.png\".format(name)), img) data[t] = img print(\"Image {}/{}\".format(t, steps)) self.env.close() # main if __name__", "generator.store_image_info() if config['make_dataset'] in [\"new\", \"resume\"]: cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) if done:", "(in the image) #prepare data strucuture data, name = generator.data_struct_image() for object_uid in", "img = cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize)) cv2.imshow(\"image\", img) cv2.waitKey(1) padding = 6 - len(str(t+7999))", "boxc = list(boxc) projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id)) for point", "= GeneratorVae() else: raise Exception(\"dataset_type in config: use one of 'coco', 'dope', 'vae'!\")", "in data[\"objects\"][-1][\"projected_cuboid\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [0,255,0], -1) for projected_cuboid_point in", "'vae': generator = GeneratorVae() else: raise Exception(\"dataset_type in config: use one of 'coco',", "def _segmentationToPoly(mask, ): \"\"\" Convert segmentation from RLE to polynoms ([[x1 y1 x2", "f: json.dump(self.camera, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4)", "import sys import random from pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult from pycocotools import mask", "np.array([((x + 1) << 24) + first_link_uid for x in range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids", "data_train, data_test def resume(self): #DOPE try: files_test = [int(x.replace('.jpg','')) for x in os.listdir(config['output_test_folder'])", "visible representation assert(area > config['min_obj_area']) assert(len(seg)>0 and len(seg[0])>0) except: #make inverse map id->name", "files: os.remove(f) files = glob.glob(os.path.join(config['output_train_folder'],'./*')) for f in files: os.remove(f) data_train, data_test =", "str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, )) if config['make_dataset'] in [\"new\", \"resume\"]: filename = \"_camera_settings\" +", "can happen for small numbers try: with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f: data_train =", ") return data_train, data_test def create_3D_box(env_object,objdim): #DOPE objpos = env_object.get_position() objorient = env_object.get_orientation()", "[], \"exported_objects\": []} def get_env(self): #DOPE env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on", "with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4) class GeneratorVae: \"\"\" Generator", "= observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids = [x for x in np.unique(img_mask)] #identify objects(links) in the", "in the camera view (in the image) #prepare data strucuture data, name =", "= [random.uniform(1,2) for x in range(6)] #action = [2,2,2,2,2,2] self.env.robot.reset_random(action) # send the", "get_env(self): #COCO \"\"\" Create environment for COCO dataset generation according to dataset config", "path = config['output_test_folder'] if isTestSample == True else config['output_train_folder'] #get dataset image and", "image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])),", "with open(os.path.join(folder,json_name), 'w') as f: json.dump(json_dict, f, indent=4) # clear data and continue", "file for saving \"\"\" data = data_test if isTestSample == True else data_train", "self.env.render() action = [random.uniform(1,2) for x in range(6)] #action = [2,2,2,2,2,2] self.env.robot.reset_random(action) #", "__init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} def get_env(self): #DOPE env = RandomizedEnvWrapper(env=gym.make(config['env_name'],", "\"projected_3DBB_centroid\": projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict) def visualize(self): #DOPE image = im for projected_cuboid_point in", "self.init_data() data = data_test if isTestSample == True else data_train name = '{}.jpg'.format(image_id)", "lineWidth = 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0, 0.99), lineWidth = 4, lifeTime =", "seen print('Too small object of class {} with area={} in img {}'.format(self.too_small_obj, self.data_dict['area'],", "in the training config file \"\"\" with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\") as read_file: clr", "0.99), lineWidth = 4, lifeTime = 1) def write_json_end(self): #DOPE self.camera = {\"camera_settings\":", "open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4) filename = \"_object_settings\" + '.json'", "range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id = image_id + 1 #unique isTestSample", "True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self): \"\"\"", "for VAE vision model training \"\"\" def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\":", "= config['camera_resolution'], dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return", "commentjson.dump(config, f) if config['dataset_type'] == 'vae': generator.get_env() dataset_pth = config['output_folder'] + '/train' generator.collect_data(config['num_episodes'])", "max(img_ids) +1 # +1 if last sample were test (thus not in here).", "#fix colors img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids = [x for x in np.unique(img_mask)] #identify", "= data_test if isTestSample == True else data_train name = 'img_{}_cam{}.jpg'.format(image_id,camera_id) return data,", "def resume(self): #COCO \"\"\" Resume COCO dataset generation Returns: :return data_train: (dict) Training", "def get_append_annotations(self): #COCO \"\"\" Make and append COCO annotations for each object in", "t in range(config['num_steps']): #loop through steps # randomize the movements of robots (using", "env_object_list[0] class_name = env_object.get_name() else: continue if class_name in config['used_class_names']: class_id = config['used_class_names'][class_name]", "\"\"\" data_train = dict( images=[# file_name, height, width, id ], type='instances', annotations=[# segmentation,", "for key, value in config['object_colors'].items(): new_value = [] for item in value: new_value.append(clr[item])", "\"\"\" Create list of dictionaries with category id-name pairs in MSCOCO format Returns:", "use one of 'coco', 'dope', 'vae'!\") #prepare directories config['output_test_folder'] = config['output_folder'] + '/test'", "{ \"class\": class_name, \"class_id\": class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\":", "if config['dataset_type'] == 'coco': generator = GeneratorCoco() elif config['dataset_type'] == 'dope': generator =", "= config['show_bounding_boxes_gui'], changing_light_gui = config['changing_light_gui'], shadows_on = config['shadows_on'], color_dict = config['color_dict'], object_sampling_area =", "for projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [255,0,0], -1) image", "dataset config file \"\"\" self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True,", "config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self): self.objdim = {}", "#only render at the steps/frames we use for dataset for c in range(np.count_nonzero(config['active_cameras'])):", "inverse map id->name (just to pretty print) inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj =", "== 'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write config.json to output_folder with open(config['output_folder']+'/config_dataset.json', 'w') as", "f: json.dump(json_dict, f, indent=4) # clear data and continue data[\"images\"].clear() data[\"annotations\"].clear() class GeneratorDope:", "cuboid] projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id)) for point in box3D]", "preceding dataset generation in COCO data structure :return data_test: (dist) Testing data from", "...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param mask: (array) Bitmap mask :return segmentationPoly: (list)", "for flag in ['test','train']: if flag == 'train': folder = config['output_train_folder'] data =", "pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult from pycocotools import mask import pybullet as p from", "config['num_objects_range'], used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset =", "config['used_class_names'].items(): categories.append({\"id\": int(key), \"name\": str(value)}) return categories def _segmentationToPoly(mask, ): \"\"\" Convert segmentation", "data[\"objects\"][-1][\"projected_cuboid\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [0,255,0], -1) for projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]:", "name, episode, config['num_episodes'])) with open(os.path.join(path, filename), 'w') as f: json.dump(data, f, indent=4) def", "filename), 'w') as f: json.dump(data, f, indent=4) def get_append_annotations(self): #DOPE cuboid_with_centroid = env_object.get_bounding_box()", "\"\"\" data['images'].append(dict( file_name=name, height=im.shape[0], width=im.shape[1], id=image_id,)) def get_append_annotations(self): #COCO \"\"\" Make and append", "glob.glob(os.path.join(config['output_test_folder'],'./*')) for f in files: os.remove(f) files = glob.glob(os.path.join(config['output_train_folder'],'./*')) for f in files:", "= False try: #notify and skip the object with too small visible representation", "COCO dataset annotations Returns: :return data_train: (dict) Data structure for training data :return", "argument. Loading default config: {}'.format(CONFIG_DEFAULT)) else: config_path = pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with open(config_path) as", "([[x1 y1 x2 x2 y2 ...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param mask: (array)", "in obj_ids: #loop through kuka and used objects in the image (in the", "img print(\"Image {}/{}\".format(t, steps)) self.env.close() # main if __name__ == \"__main__\": if len(sys.argv)", "resume(self): #DOPE try: files_test = [int(x.replace('.jpg','')) for x in os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")] files_train", "image_id def data_struct_image(self): #COCO \"\"\" Assign name to COCO dataset image and train", "gripper links img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge robot links obj_ids = [x", "dataset generation according to dataset config file Returns: :return env: (object) Environment for", "FileNotFoundError: image_id = 0 return self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self): #DOPE data_train, data_test = self.init_data()", "return segmentationPoly def create_coco_json(): #COCO \"\"\" Create COCO json data structure Returns: :return", "#make inverse map id->name (just to pretty print) inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj", "camera_resolution = config['camera_resolution'], dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81)", "from preceding dataset generation in COCO data structure :return data_test: (dist) Testing data", "else: env_object_list = list(filter(lambda object: object.uid == object_uid, env_objects)) if len(env_object_list) > 0:", "config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\" + str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, )) if config['make_dataset'] in [\"new\", \"resume\"]:", "robot_uids[0]: class_name = config['robot'] elif object_uid == gripper_uids[0]: class_name = env.robot.get_name() else: env_object_list", "# if episode == 0: # generator.episode_zero() if episode % config['num_episodes_hard_reset'] == 0:", "self.env.robot.reset_random(action) # send the Kuka arms up observation, reward, done, info = self.env.step(action)", "generator.init_data() generator.episode_zero() elif config['make_dataset'] == 'resume': #resume print(\"Restoring dataset generation\") data_test, data_train, image_id", "format in str #2 or poly segmentation format bitmap = mask.decode(seg) seg =", "as f: json.dump(self.object_settings, f, indent=4) class GeneratorVae: \"\"\" Generator class for image dataset", "projected_cuboid_point)), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0], -1) image", "from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) except FileNotFoundError: image_id = 0 return", "if class_name in config['used_class_names']: class_id = config['used_class_names'][class_name] generator.get_append_annotations() #annotate and append annotations if", "for COCO image dataset for YOLACT vision model training \"\"\" def __init__(self): pass", "coordinates for COCO annotated object \"\"\" mask = img_mask==object_uid mask = np.expand_dims(mask, axis=2)", "else: folder = config['output_test_folder'] data = data_test json_name = 'img_{}_cam{}.json'.format(image_id, camera_id) json_dict =", "\"\"\" self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on = config['gui_on'],", "view (in the image) img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge gripper links img_mask", "{}'.format(CONFIG_DEFAULT)) else: config_path = pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with open(config_path) as file: config = commentjson.load(file)", "render_on = True, gui_on = config['gui_on'], show_bounding_boxes_gui = config['show_bounding_boxes_gui'], changing_light_gui = config['changing_light_gui'], shadows_on", "dataset generation Returns: :return data_train: (dict) Training data from preceding dataset generation in", "area={} in img {}'.format(self.too_small_obj, self.data_dict['area'], name)) def visualize(self): #COCO \"\"\" Visualize mask and", "config['camera_resolution'], dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) def collect_data(self,", "the Kuka arms up observation, reward, done, info = self.env.step(action) img = observation['camera_data'][6]['image']", "observation = env.get_observation() env_objects = observation[\"objects\"] for t in range(config['num_steps']): #loop through steps", "img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids = [x for x in np.unique(img_mask)] #identify objects(links) in", "x in range(6)] #action = [2,2,2,2,2,2] self.env.robot.reset_random(action) # send the Kuka arms up", "gripper_uids = np.array([((x + 1) << 24) + first_link_uid for x in range(env.robot.gripper_index,", "for point in cuboid] projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id)) for", "in the env, add colors config['used_class_names'] = dict([x[1:3] for x in config['used_class_names_quantity']]) used_objects", "the init position of robots #random_pos randomizes the init positions of objects #", "= {\"objects\":[]} data_test = {\"objects\":[]} return data_train, data_test def resume(self): #DOPE try: files_test", "format Returns: :return categories: (list) Categories in MSCOCO format \"\"\" categories = []", "main loop for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through episodes print(\"episode: {}/{}\".format(episode, config['num_episodes']))", "the init positions of objects # if episode == 0: # generator.episode_zero() if", "<= 1: config_path = CONFIG_DEFAULT print('No config.json passed in argument. Loading default config:", "random_pos=True, hard=False) observation = env.get_observation() env_objects = observation[\"objects\"] for t in range(config['num_steps']): #loop", "Assign name to COCO dataset image and train of test status Returns: :param", "= config['num_objects_range'], used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset", "visualize(self): #COCO \"\"\" Visualize mask and bounding box coordinates for COCO annotated object", "myGym import envs from matplotlib.pyplot import imshow, show import cv2 import numpy as", "objorient = env_object.get_orientation() #objdim = env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center class GeneratorCoco: #COCO", "\"class\": class_name, \"class_id\": class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\": bounding_box,", "for image dataset for VAE vision model training \"\"\" def __init__(self): self.object_settings =", "name = 'img_{}_cam{}.jpg'.format(image_id,camera_id) return data, name def store_image_info(self): #COCO \"\"\" Append COCO dataset", "+ 1) << 24) + first_link_uid for x in range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids =", "reward, done, info = env.step(action) if t == config['num_steps']-1 or t%config['make_shot_every_frame'] == 0:", "get_append_annotations(self): #COCO \"\"\" Make and append COCO annotations for each object in the", "objects to appear in the env, add colors config['used_class_names'] = dict([x[1:3] for x", "data_test = dict( images=[# file_name, height, width, id ], type='instances', annotations=[# segmentation, area,", "as specified in the training config file \"\"\" with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\") as", "dataset generation >0, otherwise 0 if config['make_dataset'] == \"new\": #cleanup files files =", "= {} for key, value in config['object_colors'].items(): new_value = [] for item in", "data_train = dict( images=[# file_name, height, width, id ], type='instances', annotations=[# segmentation, area,", "GeneratorVae: \"\"\" Generator class for image dataset for VAE vision model training \"\"\"", "# randomize the movements of robots (using joint control) action = env.action_space.sample() observation,", "\"resume\"]: print(\"Storing annotations.json at episode {} of {}.\".format(episode, config['num_episodes'])) for flag in ['test','train']:", "category_id, id ], categories = _category_coco_format(), ) data_test = dict( images=[# file_name, height,", "directory \"\"\" if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing annotations.json at episode {} of", "only supported format at the moment def get_env(self): \"\"\" Create environment for VAE", "== 0 or episode == config['num_episodes']-1: generator.write_json_end() data_train, data_test = generator.init_data() # end", "= True, gui_on = config['gui_on'], show_bounding_boxes_gui = config['show_bounding_boxes_gui'], changing_light_gui = config['changing_light_gui'], shadows_on =", "with suggested colors new_dict = {} for key, value in config['object_colors'].items(): new_value =", "for saving \"\"\" data = data_test if isTestSample == True else data_train name", "or at the end if episode % config['autosafe_episode'] == 0 or episode ==", "im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids = [x for", "Write json file with COCO annotations to output directory \"\"\" if config['make_dataset'] in", "env.get_observation() env_objects = observation[\"objects\"] for t in range(config['num_steps']): #loop through steps # randomize", "JSON annotations every n periods or at the end if episode % config['autosafe_episode']", "as an input argument CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') # helper functions: def color_names_to_rgb():", "data_test: (dist) Data structure for testing data \"\"\" data_train, data_test = create_coco_json() return", "\"\"\" Append COCO dataset image info to corresponding data dict \"\"\" data['images'].append(dict( file_name=name,", "class GeneratorCoco: #COCO \"\"\" Generator class for COCO image dataset for YOLACT vision", "f: data_test = json.load(f) except: pass #happens when test JSON is empty, which", "env_object_list = list(filter(lambda object: object.uid == object_uid, env_objects)) if len(env_object_list) > 0: env_object", "for value, key in config['used_class_names'].items(): categories.append({\"id\": int(key), \"name\": str(value)}) return categories def _segmentationToPoly(mask,", "[\"new\", \"resume\"]: print(\"Storing annotations.json at episode {} of {}.\".format(episode, config['num_episodes'])) for flag in", "projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id)) for point in box3D] if class_name not in self.object_settings[\"exported_object_classes\"]:", "0 if config['make_dataset'] == \"new\": #cleanup files files = glob.glob(os.path.join(config['output_test_folder'],'./*')) for f in", "name def store_image_info(self): #COCO \"\"\" Append COCO dataset image info to corresponding data", "to polynoms ([[x1 y1 x2 x2 y2 ...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param", "obj_ids = [x for x in np.unique(img_mask)] #identify merged objects in the camera", "and {} at episode {} of {}.\".format(filename, name, episode, config['num_episodes'])) with open(os.path.join(path, filename),", "(config['make_dataset'] == \"display\"): data_train, data_test = generator.init_data() generator.episode_zero() # the main loop for", "cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0],", "#COCO \"\"\" Generator class for COCO image dataset for YOLACT vision model training", "if isTestSample == True else data_train name = 'img_{}_cam{}.jpg'.format(image_id,camera_id) return data, name def", "box3D, \"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict) def visualize(self): #DOPE image = im", "= 'img_{}_cam{}.json'.format(image_id, camera_id) json_dict = {\"images\": data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()} if len(data[\"images\"]) >", "self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self): #DOPE data_train, data_test = self.init_data() data = data_test if isTestSample", "structure :return data_test: (dist) Testing data from preceding dataset generation in COCO data", "steps): \"\"\" Collect data for VAE dataset Parameters: :param steps: (int) Number of", "arms up observation, reward, done, info = self.env.step(action) img = observation['camera_data'][6]['image'] imgs =", "write JSON annotations every n periods or at the end if episode %", "\"resume\"]: filename = \"_camera_settings\" + '.json' print(\"Storing {}.\".format(filename)) with open(os.path.join(config['output_test_folder'], filename), 'w') as", "key, value in config['object_colors'].items(): new_value = [] for item in value: new_value.append(clr[item]) new_dict[key]", "= [list(env.project_point_to_camera_image(point, camera_id)) for point in cuboid] projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB =", "\"name\": str(value)}) return categories def _segmentationToPoly(mask, ): \"\"\" Convert segmentation from RLE to", "in [\"new\", \"resume\"]: print(\"Storing annotations.json at episode {} of {}.\".format(episode, config['num_episodes'])) for flag", "#unique isTestSample = np.random.random_sample() < config['train_test_split_pct'] # bool, test/train data? path = config['output_test_folder']", "data_test def resume(self): #DOPE try: files_test = [int(x.replace('.jpg','')) for x in os.listdir(config['output_test_folder']) if", "set-up \"\"\" self.id_unique = 0 #image_id*x for paralel dataset generation, otherwise 0 def", "and its mask im = observation[\"camera_data\"][camera_id][\"image\"] im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors img_mask", "segmentationToCocoMask, segmentationToCocoResult from pycocotools import mask import pybullet as p from bbox import", "each object in the scene \"\"\" seg = segmentationToCocoMask(img_mask,object_uid) area = float(mask.area(seg)) bbox", "config['dataset_type'] == 'vae': generator = GeneratorVae() else: raise Exception(\"dataset_type in config: use one", "the object with too small visible representation assert(area > config['min_obj_area']) assert(len(seg)>0 and len(seg[0])>0)", "#DOPE self.camera = {\"camera_settings\": []} for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras", "first_link_uid for x in range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids = np.array([((x + 1) << 24)", "increase self.id_unique = len(data_test['annotations']) + len(data_train['annotations']) print(\"Resuming from image_id {} for episodes: {}\".format(image_id,", "def get_append_annotations(self): #DOPE cuboid_with_centroid = env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg = segmentationToCocoMask(img_mask, object_uid) seg['counts']", "Data structure for training data :return data_test: (dist) Data structure for testing data", "#loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id = image_id + 1 #unique isTestSample =", "Resume COCO dataset generation Returns: :return data_train: (dict) Training data from preceding dataset", "cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) if done: print(\"Episode finished after {} timesteps\".format(t+1)) break", "with COCO annotations to output directory \"\"\" if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing", "dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj = inv_map[class_id] self.data_dict = dict( id=self.id_unique, image_id=image_id, category_id=class_id, segmentation=seg, area=area,", "COCO json data structure Returns: :return data_train: (dict) Data structure for training data", "elif config['dataset_type'] == 'dope': generator = GeneratorDope() elif config['dataset_type'] == 'vae': generator =", "config['output_train_folder'] #get dataset image and its mask im = observation[\"camera_data\"][camera_id][\"image\"] im = cv2.cvtColor(im,", "'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc = create_3D_box(env_object,self.objdim[class_name]) box3D = [] for x in range(boxp.shape[0]):", "#COCO \"\"\" Write json file with COCO annotations to output directory \"\"\" if", "of episodes initiated during dataset generation \"\"\" data = np.zeros((steps, self.imsize, self.imsize, 3),", "= CONFIG_DEFAULT print('No config.json passed in argument. Loading default config: {}'.format(CONFIG_DEFAULT)) else: config_path", "model training \"\"\" def __init__(self): pass def get_env(self): #COCO \"\"\" Create environment for", "0.47), lineWidth = 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0, 0.99), lineWidth = 4, lifeTime", "env.step(action) if t == config['num_steps']-1 or t%config['make_shot_every_frame'] == 0: # we only use", "\"class\": class_name, \"segmentation_class_id\": class_id, \"cuboid_dimensions\": self.objdim[class_name] }) self.data_dict = { \"class\": class_name, \"class_id\":", "file_name=name, height=im.shape[0], width=im.shape[1], id=image_id,)) def get_append_annotations(self): #COCO \"\"\" Make and append COCO annotations", "data_train, data_test = self.init_data() data = data_test if isTestSample == True else data_train", "training data :return data_test: (dist) Data structure for testing data \"\"\" data_train =", "data_test def create_3D_box(env_object,objdim): #DOPE objpos = env_object.get_position() objorient = env_object.get_orientation() #objdim = env_object.get_cuboid_dimensions()", "== 0: # we only use frames from some steps env.render() #only render", "\"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\": bounding_box, \"cuboid\": cuboid, \"projected_cuboid\": projected_cuboid, \"box3D\":", "cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [0,255,0], -1) for projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]: image = cv2.circle(cv2.UMat(image),", "True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) def collect_data(self, steps): \"\"\" Collect", "to output directory \"\"\" if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing annotations.json at episode", "object: object.uid == object_uid, env_objects)) if len(env_object_list) > 0: env_object = env_object_list[0] class_name", "use frames from some steps env.render() #only render at the steps/frames we use", "append annotations if config['visualize']: #visualize generator.visualize() #store dataset image and info generator.store_image_info() if", "['test','train']: if flag == 'train': folder = config['output_train_folder'] data = data_train else: folder", "Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param mask: (array) Bitmap mask :return segmentationPoly: (list) Segmentation", "\"resume\"]: print(\"Storing {} and {} at episode {} of {}.\".format(filename, name, episode, config['num_episodes']))", "#cleanup files files = glob.glob(os.path.join(config['output_test_folder'],'./*')) for f in files: os.remove(f) files = glob.glob(os.path.join(config['output_train_folder'],'./*'))", "self.id_unique = len(data_test['annotations']) + len(data_train['annotations']) print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes']))", "t == config['num_steps']-1 or t%config['make_shot_every_frame'] == 0: # we only use frames from", "'r') as f: data_test = json.load(f) except: pass #happens when test JSON is", "= cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize)) cv2.imshow(\"image\", img) cv2.waitKey(1) padding = 6 - len(str(t+7999)) name", "config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self): \"\"\" Initial espisode", "\"\"\" Generator class for COCO image dataset for YOLACT vision model training \"\"\"", "simulator in COCO or DOPE format. Used for vision training. import gym from", "open(config['output_folder']+'/config_dataset.json', 'w') as f: commentjson.dump(config, f) if config['dataset_type'] == 'vae': generator.get_env() dataset_pth =", "used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], dataset = True, ), config_path =", "new_value.append(clr[item]) new_dict[key] = new_value config['color_dict'] = new_dict def _category_coco_format(): #COCO \"\"\" Create list", "format at the moment def get_env(self): \"\"\" Create environment for VAE dataset generation", "else data_train name = '{}.jpg'.format(image_id) return data, name def store_image_info(self): #DOPE #write dataset", "#notify and skip the object with too small visible representation assert(area > config['min_obj_area'])", "\"\"\" Initial espisode set-up \"\"\" self.id_unique = 0 #image_id*x for paralel dataset generation,", "in ['test','train']: if flag == 'train': folder = config['output_train_folder'] data = data_train else:", "max(max(files_test),max(files_train)) print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) self.episode_zero() except FileNotFoundError: image_id", "object.uid == object_uid, env_objects)) if len(env_object_list) > 0: env_object = env_object_list[0] class_name =", "= config['changing_light_gui'], shadows_on = config['shadows_on'], color_dict = config['color_dict'], object_sampling_area = config['object_sampling_area'], num_objects_range =", "data_test = create_coco_json() return data_train, data_test def resume(self): #COCO \"\"\" Resume COCO dataset", "writing files image_id = 0 #for paralel dataset generation >0, otherwise 0 if", "'r') as f: data_train = json.load(f) # get the ID of last image", "espisode set-up \"\"\" self.id_unique = 0 #image_id*x for paralel dataset generation, otherwise 0", "#COCO \"\"\" Create environment for COCO dataset generation according to dataset config file", "= inv_map[class_id] self.data_dict = dict( id=self.id_unique, image_id=image_id, category_id=class_id, segmentation=seg, area=area, bbox=bbox, iscrowd=0, )", "as np import os import glob import json import commentjson import sys import", "folder = config['output_test_folder'] data = data_test json_name = 'img_{}_cam{}.json'.format(image_id, camera_id) json_dict = {\"images\":", "data_test = generator.init_data() generator.episode_zero() # the main loop for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']):", "config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self): self.objdim = {} while len(self.objdim.keys())", "render at the steps/frames we use for dataset for c in range(np.count_nonzero(config['active_cameras'])): #loop", "camera_id)) projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id)) for point in cuboid] projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id))", "config['autosafe_episode'] == 0 or episode == config['num_episodes']-1: generator.write_json_end() data_train, data_test = generator.init_data() #", "boxp,boxc = create_3D_box(env_object,self.objdim[class_name]) box3D = [] for x in range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc =", "\"\"\" Write json file with COCO annotations to output directory \"\"\" if config['make_dataset']", "config['show_bounding_boxes_gui'], changing_light_gui = config['changing_light_gui'], shadows_on = config['shadows_on'], color_dict = config['color_dict'], object_sampling_area = config['object_sampling_area'],", "(string) Name of image file for saving \"\"\" data = data_test if isTestSample", "_category_coco_format(): #COCO \"\"\" Create list of dictionaries with category id-name pairs in MSCOCO", "data structure :return data_test: (dist) Testing data from preceding dataset generation in COCO", "= GeneratorDope() elif config['dataset_type'] == 'vae': generator = GeneratorVae() else: raise Exception(\"dataset_type in", "otherwise 0 if config['make_dataset'] == \"new\": #cleanup files files = glob.glob(os.path.join(config['output_test_folder'],'./*')) for f", "self.env.reset(random_pos=True) self.env.render() action = [random.uniform(1,2) for x in range(6)] #action = [2,2,2,2,2,2] self.env.robot.reset_random(action)", "def create_coco_json(): #COCO \"\"\" Create COCO json data structure Returns: :return data_train: (dict)", "only use frames from some steps env.render() #only render at the steps/frames we", "realistically seen print('Too small object of class {} with area={} in img {}'.format(self.too_small_obj,", "colors new_dict = {} for key, value in config['object_colors'].items(): new_value = [] for", "-1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0], -1) print(class_name) cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D()", "str #2 or poly segmentation format bitmap = mask.decode(seg) seg = _segmentationToPoly(bitmap) self.too_small_obj", "testing data \"\"\" data_train = dict( images=[# file_name, height, width, id ], type='instances',", "#DOPE data_train, data_test = self.init_data() data = data_test if isTestSample == True else", "in cuboid] projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id)) for point in", "initiated during dataset generation \"\"\" data = np.zeros((steps, self.imsize, self.imsize, 3), dtype='f') for", "x in config['used_class_names_quantity']]) used_objects = [] for x in config['used_class_names_quantity']: for _ in", "for dataset for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id =", "#2 or poly segmentation format bitmap = mask.decode(seg) seg = _segmentationToPoly(bitmap) self.too_small_obj =", "functions: def color_names_to_rgb(): \"\"\" Assign RGB colors to objects by name as specified", "config['make_dataset'] == \"new\": #cleanup files files = glob.glob(os.path.join(config['output_test_folder'],'./*')) for f in files: os.remove(f)", "MSCOCO format \"\"\" categories = [] for value, key in config['used_class_names'].items(): categories.append({\"id\": int(key),", "contour.flatten().tolist() if len(contour) > 4: segmentationPoly.append(contour) return segmentationPoly def create_coco_json(): #COCO \"\"\" Create", "config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing {} and {} at episode {} of {}.\".format(filename,", "env_object.get_position() objorient = env_object.get_orientation() #objdim = env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center class GeneratorCoco:", "config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write config.json to output_folder with open(config['output_folder']+'/config_dataset.json', 'w') as f: commentjson.dump(config, f)", "#annotate and append annotations if config['visualize']: #visualize generator.visualize() #store dataset image and info", "image_id + 1 #unique isTestSample = np.random.random_sample() < config['train_test_split_pct'] # bool, test/train data?", "config, specify here or pass as an input argument CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json')", "specify here or pass as an input argument CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') #", "== \"display\"): data_train, data_test = generator.init_data() generator.episode_zero() # the main loop for episode", "config['make_dataset'] in [\"new\", \"resume\"]: filename = \"_camera_settings\" + '.json' print(\"Storing {}.\".format(filename)) with open(os.path.join(config['output_test_folder'],", "Data structure for testing data \"\"\" data_train = dict( images=[# file_name, height, width,", "env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False) observation = env.get_observation() env_objects = observation[\"objects\"] for t in", "class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\": bounding_box, \"cuboid\": cuboid, \"projected_cuboid\":", "RGB colors to objects by name as specified in the training config file", "COCO image dataset for YOLACT vision model training \"\"\" def __init__(self): pass def", "active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], dataset = True, ), config_path = config['output_folder']+'/config_dataset.json')", "import os import glob import json import commentjson import sys import random from", "for x in range(env.robot.gripper_index, env.robot.num_joints + 1)]) # check mode of writing files", "data_test: (dist) Testing data from preceding dataset generation in COCO data structure :return", "== gripper_uids[0]: class_name = env.robot.get_name() else: env_object_list = list(filter(lambda object: object.uid == object_uid,", "== 'coco': generator = GeneratorCoco() elif config['dataset_type'] == 'dope': generator = GeneratorDope() elif", "episode, config['num_episodes'])) with open(os.path.join(path, filename), 'w') as f: json.dump(data, f, indent=4) def get_append_annotations(self):", "for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id) captured_image_size", "os.remove(f) data_train, data_test = generator.init_data() generator.episode_zero() elif config['make_dataset'] == 'resume': #resume print(\"Restoring dataset", "= GeneratorCoco() elif config['dataset_type'] == 'dope': generator = GeneratorDope() elif config['dataset_type'] == 'vae':", "image) img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge gripper links img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0],", "according to dataset config file Returns: :return env: (object) Environment for dataset generation", "image (in the camera view) if object_uid == robot_uids[0]: class_name = config['robot'] elif", "open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as", "c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id = image_id + 1", "== True else data_train name = '{}.jpg'.format(image_id) return data, name def store_image_info(self): #DOPE", "= create_3D_box(env_object,self.objdim[class_name]) box3D = [] for x in range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc = list(boxc)", "t%config['make_shot_every_frame'] == 0: # we only use frames from some steps env.render() #only", "data['images'].append(dict( file_name=name, height=im.shape[0], width=im.shape[1], id=image_id,)) def get_append_annotations(self): #COCO \"\"\" Make and append COCO", "indent=4) filename = \"_object_settings\" + '.json' with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.object_settings,", "VAE vision model training \"\"\" def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []}", "try: #notify and skip the object with too small visible representation assert(area >", "#loop through steps # randomize the movements of robots (using joint control) action", "the image) #prepare data strucuture data, name = generator.data_struct_image() for object_uid in obj_ids:", "joint control) action = env.action_space.sample() observation, reward, done, info = env.step(action) if t", "\"exported_objects\": []} self.env = None self.imsize = config[\"imsize\"] # only supported format at", "np.random.random_sample() < config['train_test_split_pct'] # bool, test/train data? path = config['output_test_folder'] if isTestSample ==", "for point in box3D] if class_name not in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\": class_name,", "or t%config['make_shot_every_frame'] == 0: # we only use frames from some steps env.render()", "> config['min_obj_area']) assert(len(seg)>0 and len(seg[0])>0) except: #make inverse map id->name (just to pretty", "color_names_to_rgb(): \"\"\" Assign RGB colors to objects by name as specified in the", "for x in os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")] files_train = [int(x.replace('.jpg','')) for x in os.listdir(config['output_train_folder'])", "[int(cv2.IMWRITE_JPEG_QUALITY), 70]) if done: print(\"Episode finished after {} timesteps\".format(t+1)) break # write JSON", "}) self.data_dict = { \"class\": class_name, \"class_id\": class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid,", "[1,2,3], lineColorRGB=(0.31, 0.78, 0.47), lineWidth = 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0, 0.99), lineWidth", "print(\"Storing annotations.json at episode {} of {}.\".format(episode, config['num_episodes'])) for flag in ['test','train']: if", "list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id)) for point in cuboid] projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc,", "class_name not in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\": class_name, \"segmentation_class_id\": class_id, \"cuboid_dimensions\": self.objdim[class_name] })", "annotations for each object in the scene \"\"\" seg = segmentationToCocoMask(img_mask,object_uid) area =", "value in config['object_colors'].items(): new_value = [] for item in value: new_value.append(clr[item]) new_dict[key] =", "(self.imsize, self.imsize)) cv2.imshow(\"image\", img) cv2.waitKey(1) padding = 6 - len(str(t+7999)) name = padding", "{} timesteps\".format(t+1)) break # write JSON annotations every n periods or at the", "= observation[\"objects\"] for t in range(config['num_steps']): #loop through steps # randomize the movements", "config file Returns: :return env: (object) Environment for dataset generation \"\"\" env =", "in [\"new\", \"resume\"]: cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) if done: print(\"Episode finished after", "config['changing_light_gui'], shadows_on = config['shadows_on'], color_dict = config['color_dict'], object_sampling_area = config['object_sampling_area'], num_objects_range = config['num_objects_range'],", "for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through episodes print(\"episode: {}/{}\".format(episode, config['num_episodes'])) #env reset", "+ first_link_uid for x in range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids = np.array([((x + 1) <<", "as f: json.dump(data, f, indent=4) def get_append_annotations(self): #DOPE cuboid_with_centroid = env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8]", "= env.step(action) if t == config['num_steps']-1 or t%config['make_shot_every_frame'] == 0: # we only", "glob import json import commentjson import sys import random from pycocotools.cocostuffhelper import segmentationToCocoMask,", "def color_names_to_rgb(): \"\"\" Assign RGB colors to objects by name as specified in", "iscrowd, image_id, bbox, category_id, id ], categories = _category_coco_format(), ) return data_train, data_test", "img) data[t] = img print(\"Image {}/{}\".format(t, steps)) self.env.close() # main if __name__ ==", "objpos = env_object.get_position() objorient = env_object.get_orientation() #objdim = env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center", "config['dataset_type'] == 'dope': generator = GeneratorDope() elif config['dataset_type'] == 'vae': generator = GeneratorVae()", "\"\"\" categories = [] for value, key in config['used_class_names'].items(): categories.append({\"id\": int(key), \"name\": str(value)})", "dataset_pth = config['output_folder'] + '/train' generator.collect_data(config['num_episodes']) print(\"Dataset finished. Ready to train!\") raise SystemExit(0)", "not in self.objdim.keys(): self.objdim[obj.name] = obj.get_cuboid_dimensions() def init_data(self): #DOPE data_train = {\"objects\":[]} data_test", "config['gui_on'], show_bounding_boxes_gui = config['show_bounding_boxes_gui'], changing_light_gui = config['changing_light_gui'], shadows_on = config['shadows_on'], color_dict = config['color_dict'],", "list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id)) for point in box3D] if class_name not", "__init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} self.env = None self.imsize = config[\"imsize\"]", "= [int(x.replace('.jpg','')) for x in os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")] files_train = [int(x.replace('.jpg','')) for x", "objects # if episode == 0: # generator.episode_zero() if episode % config['num_episodes_hard_reset'] ==", "img_mask) #merge robot links obj_ids = [x for x in np.unique(img_mask)] #identify merged", "captured_image_size = {\"width\": config['camera_resolution'][0], \"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\" + str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, ))", "#json file with suggested colors new_dict = {} for key, value in config['object_colors'].items():", "{} for key, value in config['object_colors'].items(): new_value = [] for item in value:", "in config['used_class_names']: class_id = config['used_class_names'][class_name] generator.get_append_annotations() #annotate and append annotations if config['visualize']: #visualize", "f in files: os.remove(f) data_train, data_test = generator.init_data() generator.episode_zero() elif config['make_dataset'] == 'resume':", "reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False) observation = env.get_observation() env_objects = observation[\"objects\"] for t", "= config['color_dict'], object_sampling_area = config['object_sampling_area'], num_objects_range = config['num_objects_range'], used_objects = used_objects, active_cameras =", "+ 1)]) # check mode of writing files image_id = 0 #for paralel", "image and its mask im = observation[\"camera_data\"][camera_id][\"image\"] im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors", "'w') as f: json.dump(data, f, indent=4) def get_append_annotations(self): #DOPE cuboid_with_centroid = env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8]", "pass as an input argument CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') # helper functions: def", "for contour in contours: contour = contour.flatten().tolist() if len(contour) > 4: segmentationPoly.append(contour) return", "except FileNotFoundError: image_id = 0 return data_test, data_train, image_id def data_struct_image(self): #COCO \"\"\"", "episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through episodes print(\"episode: {}/{}\".format(episode, config['num_episodes'])) #env reset #random_robot", "colors img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids = [x for x in np.unique(img_mask)] #identify objects(links)", "for testing data \"\"\" data_train = dict( images=[# file_name, height, width, id ],", "Kuka arms up observation, reward, done, info = self.env.step(action) img = observation['camera_data'][6]['image'] imgs", "= _category_coco_format(), ) return data_train, data_test def create_3D_box(env_object,objdim): #DOPE objpos = env_object.get_position() objorient", "last generated image in preceding dataset generation \"\"\" try: with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as", "#define objects to appear in the env, add colors config['used_class_names'] = dict([x[1:3] for", "config['output_train_folder'] = config['output_folder'] + '/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define objects to appear", "exist_ok=True) #define objects to appear in the env, add colors config['used_class_names'] = dict([x[1:3]", "show_bounding_boxes_gui = config['show_bounding_boxes_gui'], changing_light_gui = config['changing_light_gui'], shadows_on = config['shadows_on'], color_dict = config['color_dict'], object_sampling_area", "1 #unique isTestSample = np.random.random_sample() < config['train_test_split_pct'] # bool, test/train data? path =", "area, iscrowd, image_id, bbox, category_id, id ], categories = _category_coco_format(), ) return data_train,", "seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str #2 or poly segmentation format", "= new_value config['color_dict'] = new_dict def _category_coco_format(): #COCO \"\"\" Create list of dictionaries", "im = observation[\"camera_data\"][camera_id][\"image\"] im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids", "of robots (using joint control) action = env.action_space.sample() observation, reward, done, info =", "= str(seg['counts'], \"utf-8\") #utf-8 format in str #2 or poly segmentation format bitmap", "dataset image and its mask im = observation[\"camera_data\"][camera_id][\"image\"] im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix", "box3D] if class_name not in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\": class_name, \"segmentation_class_id\": class_id, \"cuboid_dimensions\":", "format. Used for vision training. import gym from myGym import envs from matplotlib.pyplot", "id ], categories = _category_coco_format(), ) data_test = dict( images=[# file_name, height, width,", "in COCO data structure :return data_test: (dist) Testing data from preceding dataset generation", "len(sys.argv) <= 1: config_path = CONFIG_DEFAULT print('No config.json passed in argument. Loading default", "in config['used_class_names_quantity']: for _ in range(x[0]): used_objects.append(x[1]) if config['color_dict'] == 'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\")", "config['shadows_on'], color_dict = config['color_dict'], object_sampling_area = config['object_sampling_area'], num_objects_range = config['num_objects_range'], used_objects = used_objects,", "= [x for x in np.unique(img_mask)] #identify objects(links) in the camera view (in", "id->name (just to pretty print) inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj = inv_map[class_id] self.data_dict", "img = observation['camera_data'][6]['image'] imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize)) cv2.imshow(\"image\",", "lineColorRGB=(0.0, 0.0, 0.99), lineWidth = 4, lifeTime = 1) def write_json_end(self): #DOPE self.camera", "[] for contour in contours: contour = contour.flatten().tolist() if len(contour) > 4: segmentationPoly.append(contour)", "\"\"\" Generator class for image dataset for VAE vision model training \"\"\" def", "\"\"\" contours, _ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly = [] for contour in", "file_name, height, width, id ], type='instances', annotations=[# segmentation, area, iscrowd, image_id, bbox, category_id,", "for t in range(config['num_steps']): #loop through steps # randomize the movements of robots", "self.imsize, 3), dtype='f') for t in range(steps): self.env.reset(random_pos=True) self.env.render() action = [random.uniform(1,2) for", "str bbox = mask.toBbox(seg).flatten().tolist() bounding_box = {'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc =", "add colors config['used_class_names'] = dict([x[1:3] for x in config['used_class_names_quantity']]) used_objects = [] for", "import pkg_resources # config, specify here or pass as an input argument CONFIG_DEFAULT", "(int) ID of last generated image in preceding dataset generation \"\"\" try: with", "env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg = segmentationToCocoMask(img_mask, object_uid) seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format", "new_dict = {} for key, value in config['object_colors'].items(): new_value = [] for item", "return data_train, data_test def resume(self): #DOPE try: files_test = [int(x.replace('.jpg','')) for x in", "> 4: segmentationPoly.append(contour) return segmentationPoly def create_coco_json(): #COCO \"\"\" Create COCO json data", "img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge robot links obj_ids = [x for x", "data['annotations'].append(self.data_dict) #append annotations self.id_unique +=1 else: #area too small to be realistically seen", "objects in the camera view (in the image) #prepare data strucuture data, name", "int(key), \"name\": str(value)}) return categories def _segmentationToPoly(mask, ): \"\"\" Convert segmentation from RLE", "{}.\".format(filename)) with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4) with open(os.path.join(config['output_train_folder'], filename),", "representation assert(area > config['min_obj_area']) assert(len(seg)>0 and len(seg[0])>0) except: #make inverse map id->name (just", "'coco': generator = GeneratorCoco() elif config['dataset_type'] == 'dope': generator = GeneratorDope() elif config['dataset_type']", "if episode % config['num_episodes_hard_reset'] == 0: #to prevent objects vanishing when GUI is", "at the end if episode % config['autosafe_episode'] == 0 or episode == config['num_episodes']-1:", "pairs in MSCOCO format Returns: :return categories: (list) Categories in MSCOCO format \"\"\"", "imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize)) cv2.imshow(\"image\", img) cv2.waitKey(1) padding", "of last generated image in preceding dataset generation \"\"\" try: with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r')", "'.json' print(\"Storing {}.\".format(filename)) with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4) with", "), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) def collect_data(self, steps): \"\"\" Collect data", "== 'vae': generator = GeneratorVae() else: raise Exception(\"dataset_type in config: use one of", "[data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0], -1) image", "= pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') # helper functions: def color_names_to_rgb(): \"\"\" Assign RGB colors to", "env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on = config['gui_on'], show_bounding_boxes_gui", "segmentation, area, iscrowd, image_id, bbox, category_id, id ], categories = _category_coco_format(), ) return", "numbers try: with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f: data_train = json.load(f) # get the", "create_3D_box(env_object,objdim): #DOPE objpos = env_object.get_position() objorient = env_object.get_orientation() #objdim = env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2])", "def draw_bounding_box_3D(self): #DOPE for points in range(7): #p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78, 0.47), lineWidth", "projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [0,255,0], -1) for projected_cuboid_point", "image_id, bbox, category_id, id ], categories = _category_coco_format(), ) return data_train, data_test def", "with too small visible representation assert(area > config['min_obj_area']) assert(len(seg)>0 and len(seg[0])>0) except: #make", "name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) if done: print(\"Episode finished after {} timesteps\".format(t+1)) break #", "= self.env.step(action) img = observation['camera_data'][6]['image'] imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.resize(imgs[0:450,100:500], (self.imsize,", "assert(len(seg)>0 and len(seg[0])>0) except: #make inverse map id->name (just to pretty print) inv_map", "info generator.store_image_info() if config['make_dataset'] in [\"new\", \"resume\"]: cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) if", "if x.endswith(\".jpg\")] files_train = [int(x.replace('.jpg','')) for x in os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")] image_id =", "name: (string) Name of image file for saving \"\"\" data = data_test if", "if len(contour) > 4: segmentationPoly.append(contour) return segmentationPoly def create_coco_json(): #COCO \"\"\" Create COCO", "axis=2) mask = 255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1) print(class_name) if self.too_small_obj: cv2.imshow('Too small object', mask)", "GeneratorDope: #DOPE def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} def get_env(self): #DOPE", "[]} def get_env(self): #DOPE env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True,", "<< 24) + first_link_uid for x in range(env.robot.gripper_index, env.robot.num_joints + 1)]) # check", "generation \"\"\" env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on =", "config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) def collect_data(self, steps): \"\"\" Collect data for", "image_id = 0 return self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self): #DOPE data_train, data_test = self.init_data() data", "[x for x in np.unique(img_mask)] #identify merged objects in the camera view (in", "initialize dataset generator if config['dataset_type'] == 'coco': generator = GeneratorCoco() elif config['dataset_type'] ==", "70]) if done: print(\"Episode finished after {} timesteps\".format(t+1)) break # write JSON annotations", "in os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")] files_train = [int(x.replace('.jpg','')) for x in os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")]", "appear in the env, add colors config['used_class_names'] = dict([x[1:3] for x in config['used_class_names_quantity']])", "robot = config['robot'], render_on = True, gui_on = config['gui_on'], show_bounding_boxes_gui = config['show_bounding_boxes_gui'], changing_light_gui", "np.unique(img_mask)] #identify objects(links) in the camera view (in the image) img_mask = np.where(np.isin(img_mask,gripper_uids),", "when GUI is on print(\"Hard reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False) observation = env.get_observation()", "config['dataset_type'] == 'coco': generator = GeneratorCoco() elif config['dataset_type'] == 'dope': generator = GeneratorDope()", "0 return data_test, data_train, image_id def data_struct_image(self): #COCO \"\"\" Assign name to COCO", "#COCO \"\"\" Create COCO json data structure Returns: :return data_train: (dict) Data structure", "data_test, data_train, image_id = generator.resume() elif (config['make_dataset'] == \"display\"): data_train, data_test = generator.init_data()", "in range(6)] #action = [2,2,2,2,2,2] self.env.robot.reset_random(action) # send the Kuka arms up observation,", "= config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self): self.objdim = {} while", "bbox, category_id, id ], categories = _category_coco_format(), ) data_test = dict( images=[# file_name,", "def data_struct_image(self): #COCO \"\"\" Assign name to COCO dataset image and train of", "in str #2 or poly segmentation format bitmap = mask.decode(seg) seg = _segmentationToPoly(bitmap)", "if object_uid == robot_uids[0]: class_name = config['robot'] elif object_uid == gripper_uids[0]: class_name =", "data_train else: folder = config['output_test_folder'] data = data_test json_name = 'img_{}_cam{}.json'.format(image_id, camera_id) json_dict", "[img[\"id\"] for img in data_train['images']] image_id = max(img_ids) +1 # +1 if last", "p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0, 0.99), lineWidth = 4, lifeTime = 1) def write_json_end(self): #DOPE", "class_id, \"cuboid_dimensions\": self.objdim[class_name] }) self.data_dict = { \"class\": class_name, \"class_id\": class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\":", "of 'coco', 'dope', 'vae'!\") #prepare directories config['output_test_folder'] = config['output_folder'] + '/test' config['output_train_folder'] =", "ID of last image img_ids = [img[\"id\"] for img in data_train['images']] image_id =", "self.draw_bounding_box_3D() def draw_bounding_box_3D(self): #DOPE for points in range(7): #p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78, 0.47),", "= env.get_camera_opencv_matrix_values(camera_id) captured_image_size = {\"width\": config['camera_resolution'][0], \"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\" + str(camera_id), intrinsic_settings=intrinsic_settings,", "here or pass as an input argument CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') # helper", "= cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [0,255,0], -1) for projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]: image =", "objects(links) in the camera view (in the image) img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask)", "data[\"images\"].clear() data[\"annotations\"].clear() class GeneratorDope: #DOPE def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []}", "the ids, just need to monothically increase self.id_unique = len(data_test['annotations']) + len(data_train['annotations']) print(\"Resuming", "open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f: data_train = json.load(f) # get the ID of last", "tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0], -1)", "or episode == config['num_episodes']-1: generator.write_json_end() data_train, data_test = generator.init_data() # end print('DATASET FINISHED')", "format seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str #2 or poly segmentation", "Make and append COCO annotations for each object in the scene \"\"\" seg", "im for projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [0,255,0], -1)", "== robot_uids[0]: class_name = config['robot'] elif object_uid == gripper_uids[0]: class_name = env.robot.get_name() else:", "episode % config['num_episodes_hard_reset'] == 0: #to prevent objects vanishing when GUI is on", "# the main loop for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through episodes print(\"episode:", "https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param mask: (array) Bitmap mask :return segmentationPoly: (list) Segmentation converted to", "mask im = observation[\"camera_data\"][camera_id][\"image\"] im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"]", "len(seg[0])>0) except: #make inverse map id->name (just to pretty print) inv_map = dict(zip(config['used_class_names'].values(),", "cv2.waitKey(1) print(class_name) if self.too_small_obj: cv2.imshow('Too small object', mask) else: cv2.imshow('Labeled object', mask) cv2.waitKey(1000)", "= observation[\"camera_data\"][camera_id][\"image\"] im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids =", "use for dataset for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id", "of class {} with area={} in img {}'.format(self.too_small_obj, self.data_dict['area'], name)) def visualize(self): #COCO", "config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return", "= 0 return self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self): #DOPE data_train, data_test = self.init_data() data =", "links obj_ids = [x for x in np.unique(img_mask)] #identify merged objects in the", "-1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int,", "\"\"\" def __init__(self): pass def get_env(self): #COCO \"\"\" Create environment for COCO dataset", "name def store_image_info(self): #DOPE #write dataset image info filename = str(image_id) + '.json'", "= [x for x in np.unique(img_mask)] #identify merged objects in the camera view", "matplotlib.pyplot import imshow, show import cv2 import numpy as np import os import", "Returns: :return env: (object) Environment for dataset generation \"\"\" env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot", "x2 y2 ...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param mask: (array) Bitmap mask :return", "holes in the ids, just need to monothically increase self.id_unique = len(data_test['annotations']) +", "item in value: new_value.append(clr[item]) new_dict[key] = new_value config['color_dict'] = new_dict def _category_coco_format(): #COCO", "Returns: :return categories: (list) Categories in MSCOCO format \"\"\" categories = [] for", "in preceding dataset generation \"\"\" try: with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f: data_test =", "data_train = json.load(f) # get the ID of last image img_ids = [img[\"id\"]", "0: env_object = env_object_list[0] class_name = env_object.get_name() else: continue if class_name in config['used_class_names']:", "in img {}'.format(self.too_small_obj, self.data_dict['area'], name)) def visualize(self): #COCO \"\"\" Visualize mask and bounding", "[255,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0], -1) print(class_name) cv2.imshow('image',image) cv2.waitKey(1000)", "= cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4,", "the camera view) if object_uid == robot_uids[0]: class_name = config['robot'] elif object_uid ==", "'dope', 'vae'!\") #prepare directories config['output_test_folder'] = config['output_folder'] + '/test' config['output_train_folder'] = config['output_folder'] +", "episodes initiated during dataset generation \"\"\" data = np.zeros((steps, self.imsize, self.imsize, 3), dtype='f')", "observation['camera_data'][6]['image'] imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize)) cv2.imshow(\"image\", img) cv2.waitKey(1)", "env.robot.get_name() else: env_object_list = list(filter(lambda object: object.uid == object_uid, env_objects)) if len(env_object_list) >", "in the camera view (in the image) img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge", "point in cuboid] projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id)) for point", "if isTestSample == True else data_train name = '{}.jpg'.format(image_id) return data, name def", "structure :return image_id: (int) ID of last generated image in preceding dataset generation", "= used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], dataset = True, ), config_path", "{\"width\": config['camera_resolution'][0], \"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\" + str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, )) if config['make_dataset']", "in self.objdim.keys(): self.objdim[obj.name] = obj.get_cuboid_dimensions() def init_data(self): #DOPE data_train = {\"objects\":[]} data_test =", "len(env_object_list) > 0: env_object = env_object_list[0] class_name = env_object.get_name() else: continue if class_name", "= env_object.get_name() else: continue if class_name in config['used_class_names']: class_id = config['used_class_names'][class_name] generator.get_append_annotations() #annotate", "raise Exception(\"dataset_type in config: use one of 'coco', 'dope', 'vae'!\") #prepare directories config['output_test_folder']", "import glob import json import commentjson import sys import random from pycocotools.cocostuffhelper import", "action = [random.uniform(1,2) for x in range(6)] #action = [2,2,2,2,2,2] self.env.robot.reset_random(action) # send", "= generator.resume() elif (config['make_dataset'] == \"display\"): data_train, data_test = generator.init_data() generator.episode_zero() # the", "json.dump(data, f, indent=4) def get_append_annotations(self): #DOPE cuboid_with_centroid = env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg =", "generation in COCO data structure :return data_test: (dist) Testing data from preceding dataset", "area = float(mask.area(seg)) bbox = mask.toBbox(seg).flatten().tolist() #1 run length encoding RLE segmentation format", "flag == 'train': folder = config['output_train_folder'] data = data_train else: folder = config['output_test_folder']", "import random from pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult from pycocotools import mask import pybullet", "self.episode_zero() except FileNotFoundError: image_id = 0 return self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self): #DOPE data_train, data_test", "bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc = create_3D_box(env_object,self.objdim[class_name]) box3D = [] for x in", "config['num_episodes'])) self.episode_zero() except FileNotFoundError: image_id = 0 return self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self): #DOPE data_train,", "width, id ], type='instances', annotations=[# segmentation, area, iscrowd, image_id, bbox, category_id, id ],", "4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0], -1) image =", "= True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self):", "dictionaries with category id-name pairs in MSCOCO format Returns: :return categories: (list) Categories", "-9.81) return env def episode_zero(self): \"\"\" Initial espisode set-up \"\"\" self.id_unique = 0", "env.render() #only render at the steps/frames we use for dataset for c in", "annotations.json at episode {} of {}.\".format(episode, config['num_episodes'])) for flag in ['test','train']: if flag", "), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self): \"\"\" Initial", "def _category_coco_format(): #COCO \"\"\" Create list of dictionaries with category id-name pairs in", "+ '.json' with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4) with open(os.path.join(config['output_train_folder'],", "len(str(t+7999)) name = padding * \"0\" + str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img) data[t] =", "print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) except FileNotFoundError: image_id = 0", "if config['color_dict'] == 'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write config.json to output_folder with open(config['output_folder']+'/config_dataset.json',", "if class_name not in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\": class_name, \"segmentation_class_id\": class_id, \"cuboid_dimensions\": self.objdim[class_name]", "config['camera_resolution'], dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env", "robots (using joint control) action = env.action_space.sample() observation, reward, done, info = env.step(action)", "obj.get_cuboid_dimensions() def init_data(self): #DOPE data_train = {\"objects\":[]} data_test = {\"objects\":[]} return data_train, data_test", "MSCOCO format Returns: :return categories: (list) Categories in MSCOCO format \"\"\" categories =", "def visualize(self): #DOPE image = im for projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]: image = cv2.circle(cv2.UMat(image),", "passed in argument. Loading default config: {}'.format(CONFIG_DEFAULT)) else: config_path = pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with", "env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\": bounding_box, \"cuboid\": cuboid, \"projected_cuboid\": projected_cuboid, \"box3D\": box3D,", "cv2.waitKey(1000) print(self.data_dict['bbox']) def write_json_end(self): #COCO \"\"\" Write json file with COCO annotations to", "moment def get_env(self): \"\"\" Create environment for VAE dataset generation according to dataset", "name = '{}.jpg'.format(image_id) return data, name def store_image_info(self): #DOPE #write dataset image info", "of {}.\".format(filename, name, episode, config['num_episodes'])) with open(os.path.join(path, filename), 'w') as f: json.dump(data, f,", "BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center class GeneratorCoco: #COCO \"\"\" Generator class for COCO image dataset", "if config['make_dataset'] in [\"new\", \"resume\"]: filename = \"_camera_settings\" + '.json' print(\"Storing {}.\".format(filename)) with", "= env_object_list[0] class_name = env_object.get_name() else: continue if class_name in config['used_class_names']: class_id =", "#prepare directories config['output_test_folder'] = config['output_folder'] + '/test' config['output_train_folder'] = config['output_folder'] + '/train' os.makedirs(config[\"output_test_folder\"],", "data_test json_name = 'img_{}_cam{}.json'.format(image_id, camera_id) json_dict = {\"images\": data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()} if", "converted to polynoms \"\"\" contours, _ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly = []", "mask :return segmentationPoly: (list) Segmentation converted to polynoms \"\"\" contours, _ = cv2.findContours((mask).astype(np.uint8),", "init positions of objects # if episode == 0: # generator.episode_zero() if episode", "Categories in MSCOCO format \"\"\" categories = [] for value, key in config['used_class_names'].items():", "lineColorRGB=(0.31, 0.78, 0.47), lineWidth = 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0, 0.99), lineWidth =", "= max(img_ids) +1 # +1 if last sample were test (thus not in", "mask) cv2.waitKey(1000) print(self.data_dict['bbox']) def write_json_end(self): #COCO \"\"\" Write json file with COCO annotations", "[] for value, key in config['used_class_names'].items(): categories.append({\"id\": int(key), \"name\": str(value)}) return categories def", "and bounding box coordinates for COCO annotated object \"\"\" mask = img_mask==object_uid mask", "episodes: {}\".format(image_id, config['num_episodes'])) except FileNotFoundError: image_id = 0 return data_test, data_train, image_id def", "from the simulator in COCO or DOPE format. Used for vision training. import", "env_objects = observation[\"objects\"] for obj in env_objects: if obj.name not in self.objdim.keys(): self.objdim[obj.name]", "at the moment def get_env(self): \"\"\" Create environment for VAE dataset generation according", "# send the Kuka arms up observation, reward, done, info = self.env.step(action) img", "= cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4,", "{}.\".format(filename, name, episode, config['num_episodes'])) with open(os.path.join(path, filename), 'w') as f: json.dump(data, f, indent=4)", "cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0], -1) print(class_name) cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D() def draw_bounding_box_3D(self): #DOPE", "imshow, show import cv2 import numpy as np import os import glob import", "\"cuboid\": cuboid, \"projected_cuboid\": projected_cuboid, \"box3D\": box3D, \"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict) def", "get_env(self): #DOPE env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on =", "self.data_dict = dict( id=self.id_unique, image_id=image_id, category_id=class_id, segmentation=seg, area=area, bbox=bbox, iscrowd=0, ) if self.too_small_obj", "through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id) captured_image_size = {\"width\": config['camera_resolution'][0], \"height\": config['camera_resolution'][1]}", ":return data_train: (dict) Training data from preceding dataset generation in COCO data structure", "up observation, reward, done, info = self.env.step(action) img = observation['camera_data'][6]['image'] imgs = cv2.cvtColor(img,", "observation[\"objects\"] for t in range(config['num_steps']): #loop through steps # randomize the movements of", "json.load(read_file) #json file with suggested colors new_dict = {} for key, value in", "(in the camera view) if object_uid == robot_uids[0]: class_name = config['robot'] elif object_uid", "if len(env_object_list) > 0: env_object = env_object_list[0] class_name = env_object.get_name() else: continue if", "print(\"Restoring dataset generation\") data_test, data_train, image_id = generator.resume() elif (config['make_dataset'] == \"display\"): data_train,", "file with suggested colors new_dict = {} for key, value in config['object_colors'].items(): new_value", "= config['output_test_folder'] if isTestSample == True else config['output_train_folder'] #get dataset image and its", "config: {}'.format(CONFIG_DEFAULT)) else: config_path = pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with open(config_path) as file: config =", "filename = str(image_id) + '.json' if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing {} and", "cv2.imshow('Too small object', mask) else: cv2.imshow('Labeled object', mask) cv2.waitKey(1000) print(self.data_dict['bbox']) def write_json_end(self): #COCO", "to dataset config file \"\"\" self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on =", "merged objects in the camera view (in the image) #prepare data strucuture data,", "env def episode_zero(self): self.objdim = {} while len(self.objdim.keys()) < len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation", "data_test if isTestSample == True else data_train name = '{}.jpg'.format(image_id) return data, name", "bounding_box, \"cuboid\": cuboid, \"projected_cuboid\": projected_cuboid, \"box3D\": box3D, \"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict)", "{}/{}\".format(t, steps)) self.env.close() # main if __name__ == \"__main__\": if len(sys.argv) <= 1:", "for VAE dataset Parameters: :param steps: (int) Number of episodes initiated during dataset", "\"\"\" with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\") as read_file: clr = json.load(read_file) #json file with", "file with COCO annotations to output directory \"\"\" if config['make_dataset'] in [\"new\", \"resume\"]:", "with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\") as read_file: clr = json.load(read_file) #json file with suggested", "#utf-8 format in str bbox = mask.toBbox(seg).flatten().tolist() bounding_box = {'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2],", "class_name = env.robot.get_name() else: env_object_list = list(filter(lambda object: object.uid == object_uid, env_objects)) if", "for x in config['used_class_names_quantity']]) used_objects = [] for x in config['used_class_names_quantity']: for _", "small object', mask) else: cv2.imshow('Labeled object', mask) cv2.waitKey(1000) print(self.data_dict['bbox']) def write_json_end(self): #COCO \"\"\"", "in config['used_class_names'].items(): categories.append({\"id\": int(key), \"name\": str(value)}) return categories def _segmentationToPoly(mask, ): \"\"\" Convert", "f, indent=4) # clear data and continue data[\"images\"].clear() data[\"annotations\"].clear() class GeneratorDope: #DOPE def", "bbox[1]+bbox[3]]} boxp,boxc = create_3D_box(env_object,self.objdim[class_name]) box3D = [] for x in range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc", "generator = GeneratorVae() else: raise Exception(\"dataset_type in config: use one of 'coco', 'dope',", "the camera view (in the image) img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge gripper", "# get the ID of last image img_ids = [img[\"id\"] for img in", "os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")] image_id = max(max(files_test),max(files_train)) print(\"Resuming from image_id {} for episodes: {}\".format(image_id,", "[0,255,0], -1) for projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [255,0,0],", "to output_folder with open(config['output_folder']+'/config_dataset.json', 'w') as f: commentjson.dump(config, f) if config['dataset_type'] == 'vae':", "try: with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f: data_test = json.load(f) except: pass #happens when", "= config['output_folder'] + '/train' generator.collect_data(config['num_episodes']) print(\"Dataset finished. Ready to train!\") raise SystemExit(0) #", "GUI is on print(\"Hard reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False) observation = env.get_observation() env_objects", "at episode {} of {}.\".format(episode, config['num_episodes'])) for flag in ['test','train']: if flag ==", "# generator.episode_zero() if episode % config['num_episodes_hard_reset'] == 0: #to prevent objects vanishing when", "Visualize mask and bounding box coordinates for COCO annotated object \"\"\" mask =", "config['make_dataset'] == 'resume': #resume print(\"Restoring dataset generation\") data_test, data_train, image_id = generator.resume() elif", "structures for COCO dataset annotations Returns: :return data_train: (dict) Data structure for training", "cv2.waitKey(1) padding = 6 - len(str(t+7999)) name = padding * \"0\" + str(t+7999)", "file: config = commentjson.load(file) # initialize dataset generator if config['dataset_type'] == 'coco': generator", "to polynoms \"\"\" contours, _ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly = [] for", "generation\") data_test, data_train, image_id = generator.resume() elif (config['make_dataset'] == \"display\"): data_train, data_test =", "isTestSample == True else config['output_train_folder'] #get dataset image and its mask im =", "COCO annotated object \"\"\" mask = img_mask==object_uid mask = np.expand_dims(mask, axis=2) mask =", "[int(x.replace('.jpg','')) for x in os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")] image_id = max(max(files_test),max(files_train)) print(\"Resuming from image_id", "= env.get_observation() env_objects = observation[\"objects\"] for t in range(config['num_steps']): #loop through steps #", "for each object in the scene \"\"\" seg = segmentationToCocoMask(img_mask,object_uid) area = float(mask.area(seg))", "robot links obj_ids = [x for x in np.unique(img_mask)] #identify merged objects in", "last sample were test (thus not in here). it's safe to have holes", "#COCO \"\"\" Make and append COCO annotations for each object in the scene", "f, indent=4) class GeneratorVae: \"\"\" Generator class for image dataset for VAE vision", "#merge robot links obj_ids = [x for x in np.unique(img_mask)] #identify merged objects", "= max(max(files_test),max(files_train)) print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) self.episode_zero() except FileNotFoundError:", "in MSCOCO format Returns: :return categories: (list) Categories in MSCOCO format \"\"\" categories", "annotations Returns: :return data_train: (dict) Data structure for training data :return data_test: (dist)", "create_coco_json(): #COCO \"\"\" Create COCO json data structure Returns: :return data_train: (dict) Data", "return env def episode_zero(self): \"\"\" Initial espisode set-up \"\"\" self.id_unique = 0 #image_id*x", "if t == config['num_steps']-1 or t%config['make_shot_every_frame'] == 0: # we only use frames", "bbox, category_id, id ], categories = _category_coco_format(), ) return data_train, data_test def create_3D_box(env_object,objdim):", "send the Kuka arms up observation, reward, done, info = self.env.step(action) img =", "object_uid) seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str bbox = mask.toBbox(seg).flatten().tolist() bounding_box", "): \"\"\" Convert segmentation from RLE to polynoms ([[x1 y1 x2 x2 y2", "class for COCO image dataset for YOLACT vision model training \"\"\" def __init__(self):", "#COCO \"\"\" Assign name to COCO dataset image and train of test status", "#DOPE objpos = env_object.get_position() objorient = env_object.get_orientation() #objdim = env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return", "class_id = config['used_class_names'][class_name] generator.get_append_annotations() #annotate and append annotations if config['visualize']: #visualize generator.visualize() #store", "(just to pretty print) inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj = inv_map[class_id] self.data_dict =", "dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def", "files = glob.glob(os.path.join(config['output_test_folder'],'./*')) for f in files: os.remove(f) files = glob.glob(os.path.join(config['output_train_folder'],'./*')) for f", "from pycocotools import mask import pybullet as p from bbox import BBox3D from", "return box.p,box.center class GeneratorCoco: #COCO \"\"\" Generator class for COCO image dataset for", "if __name__ == \"__main__\": if len(sys.argv) <= 1: config_path = CONFIG_DEFAULT print('No config.json", "object \"\"\" mask = img_mask==object_uid mask = np.expand_dims(mask, axis=2) mask = 255*mask.astype('uint8') cv2.imshow('image',im)", "= env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center class GeneratorCoco: #COCO \"\"\" Generator class for", ":return data_test: (dist) Data structure for testing data \"\"\" data_train, data_test = create_coco_json()", "image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0], -1) print(class_name) cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D() def", "Number of episodes initiated during dataset generation \"\"\" data = np.zeros((steps, self.imsize, self.imsize,", "data \"\"\" data_train, data_test = create_coco_json() return data_train, data_test def resume(self): #COCO \"\"\"", "indent=4) # clear data and continue data[\"images\"].clear() data[\"annotations\"].clear() class GeneratorDope: #DOPE def __init__(self):", "object_sampling_area = config['object_sampling_area'], num_objects_range = config['num_objects_range'], used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution", "exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define objects to appear in the env, add colors config['used_class_names']", "collect_data(self, steps): \"\"\" Collect data for VAE dataset Parameters: :param steps: (int) Number", "through kuka and used objects in the image (in the camera view) if", "[list(env.project_point_to_camera_image(point, camera_id)) for point in cuboid] projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB = [list(env.project_point_to_camera_image(point,", "\"cuboid_dimensions\": self.objdim[class_name] }) self.data_dict = { \"class\": class_name, \"class_id\": class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(),", "by name as specified in the training config file \"\"\" with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'),", "for x in range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc = list(boxc) projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid", "#DOPE data_train = {\"objects\":[]} data_test = {\"objects\":[]} return data_train, data_test def resume(self): #DOPE", "= config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self): \"\"\" Initial espisode set-up", "name = generator.data_struct_image() for object_uid in obj_ids: #loop through kuka and used objects", "every n periods or at the end if episode % config['autosafe_episode'] == 0", "range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids = np.array([((x + 1) << 24) + first_link_uid for x", "in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\": class_name, \"segmentation_class_id\": class_id, \"cuboid_dimensions\": self.objdim[class_name] }) self.data_dict =", "reward, done, info = self.env.step(action) img = observation['camera_data'][6]['image'] imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img", "data_train, data_test def create_3D_box(env_object,objdim): #DOPE objpos = env_object.get_position() objorient = env_object.get_orientation() #objdim =", "format in str bbox = mask.toBbox(seg).flatten().tolist() bounding_box = {'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]}", "camera view) if object_uid == robot_uids[0]: class_name = config['robot'] elif object_uid == gripper_uids[0]:", "id=image_id,)) def get_append_annotations(self): #COCO \"\"\" Make and append COCO annotations for each object", "== 'train': folder = config['output_train_folder'] data = data_train else: folder = config['output_test_folder'] data", "cv2.imshow(\"image\", img) cv2.waitKey(1) padding = 6 - len(str(t+7999)) name = padding * \"0\"", "vanishing when GUI is on print(\"Hard reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False) observation =", "#get dataset image and its mask im = observation[\"camera_data\"][camera_id][\"image\"] im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)", "Segmentation converted to polynoms \"\"\" contours, _ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly =", "indent=4) class GeneratorVae: \"\"\" Generator class for image dataset for VAE vision model", "argument CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') # helper functions: def color_names_to_rgb(): \"\"\" Assign RGB", "append COCO annotations for each object in the scene \"\"\" seg = segmentationToCocoMask(img_mask,object_uid)", "objects in the image (in the camera view) if object_uid == robot_uids[0]: class_name", "\"_camera_settings\" + '.json' print(\"Storing {}.\".format(filename)) with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.camera, f,", "data_train name = 'img_{}_cam{}.jpg'.format(image_id,camera_id) return data, name def store_image_info(self): #COCO \"\"\" Append COCO", "Parameters: :param mask: (array) Bitmap mask :return segmentationPoly: (list) Segmentation converted to polynoms", "else: raise Exception(\"dataset_type in config: use one of 'coco', 'dope', 'vae'!\") #prepare directories", "just need to monothically increase self.id_unique = len(data_test['annotations']) + len(data_train['annotations']) print(\"Resuming from image_id", "#merge gripper links img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge robot links obj_ids =", "clr = json.load(read_file) #json file with suggested colors new_dict = {} for key,", "x2 x2 y2 ...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param mask: (array) Bitmap mask", "\"\"\" Convert segmentation from RLE to polynoms ([[x1 y1 x2 x2 y2 ...]]).", "4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0], -1) image =", "environment for COCO dataset generation according to dataset config file Returns: :return env:", "{\"objects\":[]} data_test = {\"objects\":[]} return data_train, data_test def resume(self): #DOPE try: files_test =", "projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id)) for point in cuboid] projected_3DBB_centroid", "cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg = segmentationToCocoMask(img_mask, object_uid) seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in", "GeneratorDope() elif config['dataset_type'] == 'vae': generator = GeneratorVae() else: raise Exception(\"dataset_type in config:", "annotations every n periods or at the end if episode % config['autosafe_episode'] ==", "camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id) captured_image_size = {\"width\": config['camera_resolution'][0], \"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\" +", "return categories def _segmentationToPoly(mask, ): \"\"\" Convert segmentation from RLE to polynoms ([[x1", "config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self): \"\"\" Initial espisode set-up \"\"\"", "which can happen for small numbers try: with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f: data_train", "gui_on = config['gui_on'], show_bounding_boxes_gui = config['show_bounding_boxes_gui'], changing_light_gui = config['changing_light_gui'], shadows_on = config['shadows_on'], color_dict", "category_id, id ], categories = _category_coco_format(), ) return data_train, data_test def create_3D_box(env_object,objdim): #DOPE", "polynoms ([[x1 y1 x2 x2 y2 ...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param mask:", "\"\"\" try: with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f: data_test = json.load(f) except: pass #happens", "= config[\"imsize\"] # only supported format at the moment def get_env(self): \"\"\" Create", "if len(sys.argv) <= 1: config_path = CONFIG_DEFAULT print('No config.json passed in argument. Loading", "name as specified in the training config file \"\"\" with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\")", "timesteps\".format(t+1)) break # write JSON annotations every n periods or at the end", "c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id) captured_image_size =", "config['num_episodes'])) with open(os.path.join(path, filename), 'w') as f: json.dump(data, f, indent=4) def get_append_annotations(self): #DOPE", "dataset Parameters: :param steps: (int) Number of episodes initiated during dataset generation \"\"\"", "Corresponding data dictionary :param name: (string) Name of image file for saving \"\"\"", "<filename>myGym/generate_dataset.py<gh_stars>10-100 ## script to generate train/test sets from the simulator in COCO or", "used objects in the image (in the camera view) if object_uid == robot_uids[0]:", "#append annotations self.id_unique +=1 else: #area too small to be realistically seen print('Too", "json.dump(self.object_settings, f, indent=4) class GeneratorVae: \"\"\" Generator class for image dataset for VAE", "empty, which can happen for small numbers try: with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f:", "config['dataset_type'] == 'vae': generator.get_env() dataset_pth = config['output_folder'] + '/train' generator.collect_data(config['num_episodes']) print(\"Dataset finished. Ready", "config['used_class_names_quantity']]) used_objects = [] for x in config['used_class_names_quantity']: for _ in range(x[0]): used_objects.append(x[1])", "#for paralel dataset generation >0, otherwise 0 if config['make_dataset'] == \"new\": #cleanup files", "data_train, image_id def data_struct_image(self): #COCO \"\"\" Assign name to COCO dataset image and", "print('No config.json passed in argument. Loading default config: {}'.format(CONFIG_DEFAULT)) else: config_path = pkg_resources.resource_filename(\"myGym\",", "= img print(\"Image {}/{}\".format(t, steps)) self.env.close() # main if __name__ == \"__main__\": if", "GeneratorVae() else: raise Exception(\"dataset_type in config: use one of 'coco', 'dope', 'vae'!\") #prepare", "filename = \"_camera_settings\" + '.json' print(\"Storing {}.\".format(filename)) with open(os.path.join(config['output_test_folder'], filename), 'w') as f:", "new_dict def _category_coco_format(): #COCO \"\"\" Create list of dictionaries with category id-name pairs", "categories = _category_coco_format(), ) return data_train, data_test def create_3D_box(env_object,objdim): #DOPE objpos = env_object.get_position()", "in range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids = np.array([((x + 1) << 24) + first_link_uid for", "RLE segmentation format seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str #2 or", "links img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge robot links obj_ids = [x for", "filename), 'w') as f: json.dump(self.camera, f, indent=4) filename = \"_object_settings\" + '.json' with", "self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on = config['gui_on'], show_bounding_boxes_gui", "dataset annotations Returns: :return data_train: (dict) Data structure for training data :return data_test:", "dtype='f') for t in range(steps): self.env.reset(random_pos=True) self.env.render() action = [random.uniform(1,2) for x in", "dataset generation, otherwise 0 def init_data(self): #COCO \"\"\" Initialize data structures for COCO", "in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id) captured_image_size = {\"width\":", "data and continue data[\"images\"].clear() data[\"annotations\"].clear() class GeneratorDope: #DOPE def __init__(self): self.object_settings = {\"exported_object_classes\":", "Collect data for VAE dataset Parameters: :param steps: (int) Number of episodes initiated", "None self.imsize = config[\"imsize\"] # only supported format at the moment def get_env(self):", "env.robot.gripper_index)],dtype=np.int32) gripper_uids = np.array([((x + 1) << 24) + first_link_uid for x in", "Create environment for COCO dataset generation according to dataset config file Returns: :return", "-1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int,", "= True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) def collect_data(self, steps): \"\"\"", "generated image in preceding dataset generation \"\"\" try: with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f:", "area, iscrowd, image_id, bbox, category_id, id ], categories = _category_coco_format(), ) data_test =", "= cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize)) cv2.imshow(\"image\", img) cv2.waitKey(1) padding =", "= glob.glob(os.path.join(config['output_test_folder'],'./*')) for f in files: os.remove(f) files = glob.glob(os.path.join(config['output_train_folder'],'./*')) for f in", "6 - len(str(t+7999)) name = padding * \"0\" + str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img)", "'resume': #resume print(\"Restoring dataset generation\") data_test, data_train, image_id = generator.resume() elif (config['make_dataset'] ==", "Parameters: :param steps: (int) Number of episodes initiated during dataset generation \"\"\" data", "+ '/test' config['output_train_folder'] = config['output_folder'] + '/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define objects", "\"utf-8\") #utf-8 format in str #2 or poly segmentation format bitmap = mask.decode(seg)", "as f: json.dump(self.object_settings, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.object_settings, f,", "cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id = image_id + 1 #unique isTestSample = np.random.random_sample() < config['train_test_split_pct']", "json.dump(json_dict, f, indent=4) # clear data and continue data[\"images\"].clear() data[\"annotations\"].clear() class GeneratorDope: #DOPE", ":param mask: (array) Bitmap mask :return segmentationPoly: (list) Segmentation converted to polynoms \"\"\"", "], type='instances', annotations=[# segmentation, area, iscrowd, image_id, bbox, category_id, id ], categories =", "= {'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc = create_3D_box(env_object,self.objdim[class_name]) box3D = [] for", "+ first_link_uid for x in range(env.robot.gripper_index, env.robot.num_joints + 1)]) # check mode of", "vision training. import gym from myGym import envs from matplotlib.pyplot import imshow, show", "cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0],", "movements of robots (using joint control) action = env.action_space.sample() observation, reward, done, info", "0 #for paralel dataset generation >0, otherwise 0 if config['make_dataset'] == \"new\": #cleanup", "height, width, id ], type='instances', annotations=[# segmentation, area, iscrowd, image_id, bbox, category_id, id", "the simulator in COCO or DOPE format. Used for vision training. import gym", "cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize)) cv2.imshow(\"image\", img) cv2.waitKey(1) padding = 6 - len(str(t+7999)) name =", "and train of test status Returns: :param data: (dict) Corresponding data dictionary :param", "config['color_dict'], object_sampling_area = config['object_sampling_area'], num_objects_range = config['num_objects_range'], used_objects = used_objects, active_cameras = config['active_cameras'],", "shadows_on = config['shadows_on'], color_dict = config['color_dict'], object_sampling_area = config['object_sampling_area'], num_objects_range = config['num_objects_range'], used_objects", "\"\"\" Assign name to COCO dataset image and train of test status Returns:", "the scene \"\"\" seg = segmentationToCocoMask(img_mask,object_uid) area = float(mask.area(seg)) bbox = mask.toBbox(seg).flatten().tolist() #1", "{}'.format(self.too_small_obj, self.data_dict['area'], name)) def visualize(self): #COCO \"\"\" Visualize mask and bounding box coordinates", "self.object_settings['exported_objects'].append({ \"class\": class_name, \"segmentation_class_id\": class_id, \"cuboid_dimensions\": self.objdim[class_name] }) self.data_dict = { \"class\": class_name,", "data, name def store_image_info(self): #DOPE #write dataset image info filename = str(image_id) +", "projected_cuboid_centroid, \"bounding_box\": bounding_box, \"cuboid\": cuboid, \"projected_cuboid\": projected_cuboid, \"box3D\": box3D, \"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid,", "training. import gym from myGym import envs from matplotlib.pyplot import imshow, show import", "# +1 if last sample were test (thus not in here). it's safe", "# main if __name__ == \"__main__\": if len(sys.argv) <= 1: config_path = CONFIG_DEFAULT", "from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) self.episode_zero() except FileNotFoundError: image_id = 0", "'configs/rgbcolors.json'), \"r\") as read_file: clr = json.load(read_file) #json file with suggested colors new_dict", "have holes in the ids, just need to monothically increase self.id_unique = len(data_test['annotations'])", "cv2.COLOR_RGB2BGR) #fix colors img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids = [x for x in np.unique(img_mask)]", "= 255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1) print(class_name) if self.too_small_obj: cv2.imshow('Too small object', mask) else: cv2.imshow('Labeled", "FileNotFoundError: image_id = 0 return data_test, data_train, image_id def data_struct_image(self): #COCO \"\"\" Assign", "Generator class for image dataset for VAE vision model training \"\"\" def __init__(self):", "if obj.name not in self.objdim.keys(): self.objdim[obj.name] = obj.get_cuboid_dimensions() def init_data(self): #DOPE data_train =", "projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict) def visualize(self): #DOPE image = im for projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]:", "except FileNotFoundError: image_id = 0 return self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self): #DOPE data_train, data_test =", "helper functions: def color_names_to_rgb(): \"\"\" Assign RGB colors to objects by name as", "config['output_test_folder'] if isTestSample == True else config['output_train_folder'] #get dataset image and its mask", "0 or episode == config['num_episodes']-1: generator.write_json_end() data_train, data_test = generator.init_data() # end print('DATASET", "image = im for projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4,", "= img_mask==object_uid mask = np.expand_dims(mask, axis=2) mask = 255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1) print(class_name) if", "lifeTime = 1) def write_json_end(self): #DOPE self.camera = {\"camera_settings\": []} for c in", "dataset image and train of test status Returns: :param data: (dict) Corresponding data", "= env.robot.get_name() else: env_object_list = list(filter(lambda object: object.uid == object_uid, env_objects)) if len(env_object_list)", "in config['used_class_names_quantity']]) used_objects = [] for x in config['used_class_names_quantity']: for _ in range(x[0]):", "False: #area ok data['annotations'].append(self.data_dict) #append annotations self.id_unique +=1 else: #area too small to", "in range(7): #p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78, 0.47), lineWidth = 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0,", "during dataset generation \"\"\" data = np.zeros((steps, self.imsize, self.imsize, 3), dtype='f') for t", "data: (dict) Corresponding data dictionary :param name: (string) Name of image file for", "= {\"camera_settings\": []} for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings", "used_objects = [] for x in config['used_class_names_quantity']: for _ in range(x[0]): used_objects.append(x[1]) if", "json.dump(self.camera, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4) filename", "return self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self): #DOPE data_train, data_test = self.init_data() data = data_test if", "- len(str(t+7999)) name = padding * \"0\" + str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img) data[t]", "def write_json_end(self): #DOPE self.camera = {\"camera_settings\": []} for c in range(np.count_nonzero(config['active_cameras'])): #loop through", "= str(image_id) + '.json' if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing {} and {}", "self.imsize = config[\"imsize\"] # only supported format at the moment def get_env(self): \"\"\"", "\"segmentation_class_id\": class_id, \"cuboid_dimensions\": self.objdim[class_name] }) self.data_dict = { \"class\": class_name, \"class_id\": class_id, \"location\":env_object.get_position(),", "in range(env.robot.gripper_index, env.robot.num_joints + 1)]) # check mode of writing files image_id =", "data, name = generator.data_struct_image() for object_uid in obj_ids: #loop through kuka and used", "obj_ids: #loop through kuka and used objects in the image (in the camera", "#DOPE def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} def get_env(self): #DOPE env", "testing data \"\"\" data_train, data_test = create_coco_json() return data_train, data_test def resume(self): #COCO", "def collect_data(self, steps): \"\"\" Collect data for VAE dataset Parameters: :param steps: (int)", "in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id = image_id + 1 #unique", "data = data_test if isTestSample == True else data_train name = 'img_{}_cam{}.jpg'.format(image_id,camera_id) return", "#write config.json to output_folder with open(config['output_folder']+'/config_dataset.json', 'w') as f: commentjson.dump(config, f) if config['dataset_type']", "image_id, bbox, category_id, id ], categories = _category_coco_format(), ) data_test = dict( images=[#", "generation, otherwise 0 def init_data(self): #COCO \"\"\" Initialize data structures for COCO dataset", "'coco', 'dope', 'vae'!\") #prepare directories config['output_test_folder'] = config['output_folder'] + '/test' config['output_train_folder'] = config['output_folder']", "= float(mask.area(seg)) bbox = mask.toBbox(seg).flatten().tolist() #1 run length encoding RLE segmentation format seg['counts']", "image_id: (int) ID of last generated image in preceding dataset generation \"\"\" try:", "= 4, lifeTime = 1) def write_json_end(self): #DOPE self.camera = {\"camera_settings\": []} for", "point in box3D] if class_name not in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\": class_name, \"segmentation_class_id\":", "self.camera[\"camera_settings\"].append(dict( name=\"camera\" + str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, )) if config['make_dataset'] in [\"new\", \"resume\"]: filename", "preceding dataset generation \"\"\" try: with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f: data_test = json.load(f)", "init_data(self): #COCO \"\"\" Initialize data structures for COCO dataset annotations Returns: :return data_train:", "env: (object) Environment for dataset generation \"\"\" env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'],", "0 def init_data(self): #COCO \"\"\" Initialize data structures for COCO dataset annotations Returns:", "filename = \"_object_settings\" + '.json' with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.object_settings, f,", "positions of objects # if episode == 0: # generator.episode_zero() if episode %", "= segmentationToCocoMask(img_mask,object_uid) area = float(mask.area(seg)) bbox = mask.toBbox(seg).flatten().tolist() #1 run length encoding RLE", "dataset generation \"\"\" env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on", "while len(self.objdim.keys()) < len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation = env.get_observation() env_objects = observation[\"objects\"] for", "'train': folder = config['output_train_folder'] data = data_train else: folder = config['output_test_folder'] data =", "(thus not in here). it's safe to have holes in the ids, just", "== 'vae': generator.get_env() dataset_pth = config['output_folder'] + '/train' generator.collect_data(config['num_episodes']) print(\"Dataset finished. Ready to", "if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing annotations.json at episode {} of {}.\".format(episode, config['num_episodes']))", "json import commentjson import sys import random from pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult from", "= env_object.get_position() objorient = env_object.get_orientation() #objdim = env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center class", "elif config['make_dataset'] == 'resume': #resume print(\"Restoring dataset generation\") data_test, data_train, image_id = generator.resume()", "open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4) class GeneratorVae: \"\"\" Generator class", "box3D = [] for x in range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc = list(boxc) projected_cuboid_centroid =", "data_train: (dict) Data structure for training data :return data_test: (dist) Data structure for", "for testing data \"\"\" data_train, data_test = create_coco_json() return data_train, data_test def resume(self):", "through steps # randomize the movements of robots (using joint control) action =", "cv2.CHAIN_APPROX_SIMPLE) segmentationPoly = [] for contour in contours: contour = contour.flatten().tolist() if len(contour)", "json_dict = {\"images\": data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()} if len(data[\"images\"]) > 0: with open(os.path.join(folder,json_name),", "data[\"annotations\"], \"categories\":_category_coco_format()} if len(data[\"images\"]) > 0: with open(os.path.join(folder,json_name), 'w') as f: json.dump(json_dict, f,", "{\"exported_object_classes\": [], \"exported_objects\": []} def get_env(self): #DOPE env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'],", "of writing files image_id = 0 #for paralel dataset generation >0, otherwise 0", "observation, reward, done, info = env.step(action) if t == config['num_steps']-1 or t%config['make_shot_every_frame'] ==", "print(\"Episode finished after {} timesteps\".format(t+1)) break # write JSON annotations every n periods", "RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on = config['gui_on'], show_bounding_boxes_gui = config['show_bounding_boxes_gui'],", "output directory \"\"\" if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing annotations.json at episode {}", "= [int(x.replace('.jpg','')) for x in os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")] image_id = max(max(files_test),max(files_train)) print(\"Resuming from", "Convert segmentation from RLE to polynoms ([[x1 y1 x2 x2 y2 ...]]). Code", "Create environment for VAE dataset generation according to dataset config file \"\"\" self.env", "dict([x[1:3] for x in config['used_class_names_quantity']]) used_objects = [] for x in config['used_class_names_quantity']: for", "to have holes in the ids, just need to monothically increase self.id_unique =", "x.endswith(\".jpg\")] files_train = [int(x.replace('.jpg','')) for x in os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")] image_id = max(max(files_test),max(files_train))", "1)]) # check mode of writing files image_id = 0 #for paralel dataset", "= im for projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [0,255,0],", "print(\"Dataset finished. Ready to train!\") raise SystemExit(0) # initialize pybullet env env =", "as read_file: clr = json.load(read_file) #json file with suggested colors new_dict = {}", "= generator.get_env() first_link_uid = env.robot.robot_uid robot_uids = np.array([((x + 1) << 24) +", "with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4) filename = \"_object_settings\" +", "training data :return data_test: (dist) Data structure for testing data \"\"\" data_train, data_test", "segmentationToCocoMask(img_mask, object_uid) seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str bbox = mask.toBbox(seg).flatten().tolist()", "pybullet env env = generator.get_env() first_link_uid = env.robot.robot_uid robot_uids = np.array([((x + 1)", "class_name = config['robot'] elif object_uid == gripper_uids[0]: class_name = env.robot.get_name() else: env_object_list =", "0 #image_id*x for paralel dataset generation, otherwise 0 def init_data(self): #COCO \"\"\" Initialize", "= None self.imsize = config[\"imsize\"] # only supported format at the moment def", "[0,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image),", "image_id = max(img_ids) +1 # +1 if last sample were test (thus not", "value, key in config['used_class_names'].items(): categories.append({\"id\": int(key), \"name\": str(value)}) return categories def _segmentationToPoly(mask, ):", "= config['output_test_folder'] data = data_test json_name = 'img_{}_cam{}.json'.format(image_id, camera_id) json_dict = {\"images\": data[\"images\"],", "= _segmentationToPoly(bitmap) self.too_small_obj = False try: #notify and skip the object with too", "for episodes: {}\".format(image_id, config['num_episodes'])) except FileNotFoundError: image_id = 0 return data_test, data_train, image_id", "def resume(self): #DOPE try: files_test = [int(x.replace('.jpg','')) for x in os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")]", "the env, add colors config['used_class_names'] = dict([x[1:3] for x in config['used_class_names_quantity']]) used_objects =", "and used objects in the image (in the camera view) if object_uid ==", "= obj.get_cuboid_dimensions() def init_data(self): #DOPE data_train = {\"objects\":[]} data_test = {\"objects\":[]} return data_train,", "if last sample were test (thus not in here). it's safe to have", "_category_coco_format(), ) return data_train, data_test def create_3D_box(env_object,objdim): #DOPE objpos = env_object.get_position() objorient =", "box coordinates for COCO annotated object \"\"\" mask = img_mask==object_uid mask = np.expand_dims(mask,", "mask = 255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1) print(class_name) if self.too_small_obj: cv2.imshow('Too small object', mask) else:", "= glob.glob(os.path.join(config['output_train_folder'],'./*')) for f in files: os.remove(f) data_train, data_test = generator.init_data() generator.episode_zero() elif", "data = data_test json_name = 'img_{}_cam{}.json'.format(image_id, camera_id) json_dict = {\"images\": data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"],", "#COCO \"\"\" Initialize data structures for COCO dataset annotations Returns: :return data_train: (dict)", "VAE dataset Parameters: :param steps: (int) Number of episodes initiated during dataset generation", "for small numbers try: with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f: data_train = json.load(f) #", "cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize)) cv2.imshow(\"image\", img) cv2.waitKey(1) padding = 6", "def init_data(self): #DOPE data_train = {\"objects\":[]} data_test = {\"objects\":[]} return data_train, data_test def", "data_train: (dict) Training data from preceding dataset generation in COCO data structure :return", "json.dump(self.camera, f, indent=4) filename = \"_object_settings\" + '.json' with open(os.path.join(config['output_test_folder'], filename), 'w') as", "= config['active_cameras'], camera_resolution = config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset = True, ), config_path = config['output_folder']+'/config_dataset.json')", "status Returns: :param data: (dict) Corresponding data dictionary :param name: (string) Name of", "import envs from matplotlib.pyplot import imshow, show import cv2 import numpy as np", "elif (config['make_dataset'] == \"display\"): data_train, data_test = generator.init_data() generator.episode_zero() # the main loop", "info = env.step(action) if t == config['num_steps']-1 or t%config['make_shot_every_frame'] == 0: # we", "gym from myGym import envs from matplotlib.pyplot import imshow, show import cv2 import", "f, indent=4) filename = \"_object_settings\" + '.json' with open(os.path.join(config['output_test_folder'], filename), 'w') as f:", "as f: commentjson.dump(config, f) if config['dataset_type'] == 'vae': generator.get_env() dataset_pth = config['output_folder'] +", "data_test = self.init_data() data = data_test if isTestSample == True else data_train name", "\"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict) def visualize(self): #DOPE image = im for", "env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation = env.get_observation() env_objects = observation[\"objects\"] for obj in env_objects: if", "renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env", "= [] for contour in contours: contour = contour.flatten().tolist() if len(contour) > 4:", "+ 1 #unique isTestSample = np.random.random_sample() < config['train_test_split_pct'] # bool, test/train data? path", "= {\"objects\":[]} return data_train, data_test def resume(self): #DOPE try: files_test = [int(x.replace('.jpg','')) for", "data_test, data_train, image_id def data_struct_image(self): #COCO \"\"\" Assign name to COCO dataset image", "str(seg['counts'], \"utf-8\") #utf-8 format in str #2 or poly segmentation format bitmap =", "bool, test/train data? path = config['output_test_folder'] if isTestSample == True else config['output_train_folder'] #get", "= np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge gripper links img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge", "config['object_colors'].items(): new_value = [] for item in value: new_value.append(clr[item]) new_dict[key] = new_value config['color_dict']", "generator.get_append_annotations() #annotate and append annotations if config['visualize']: #visualize generator.visualize() #store dataset image and", "class {} with area={} in img {}'.format(self.too_small_obj, self.data_dict['area'], name)) def visualize(self): #COCO \"\"\"", "width=im.shape[1], id=image_id,)) def get_append_annotations(self): #COCO \"\"\" Make and append COCO annotations for each", "return data, name def store_image_info(self): #DOPE #write dataset image info filename = str(image_id)", "4, [0,255,0], -1) for projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4,", "observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids = [x for x in np.unique(img_mask)] #identify objects(links) in the camera", "object', mask) else: cv2.imshow('Labeled object', mask) cv2.waitKey(1000) print(self.data_dict['bbox']) def write_json_end(self): #COCO \"\"\" Write", "(dist) Data structure for testing data \"\"\" data_train = dict( images=[# file_name, height,", "seg = segmentationToCocoMask(img_mask,object_uid) area = float(mask.area(seg)) bbox = mask.toBbox(seg).flatten().tolist() #1 run length encoding", "f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4) class GeneratorVae:", "cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly = [] for contour in contours: contour = contour.flatten().tolist()", "indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4) filename = \"_object_settings\"", "-9.81) return env def episode_zero(self): self.objdim = {} while len(self.objdim.keys()) < len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'],", "= cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4,", "get the ID of last image img_ids = [img[\"id\"] for img in data_train['images']]", "dataset generation \"\"\" data = np.zeros((steps, self.imsize, self.imsize, 3), dtype='f') for t in", "[255,255,0], -1) print(class_name) cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D() def draw_bounding_box_3D(self): #DOPE for points in range(7):", "return data_train, data_test def create_3D_box(env_object,objdim): #DOPE objpos = env_object.get_position() objorient = env_object.get_orientation() #objdim", "bbox = mask.toBbox(seg).flatten().tolist() bounding_box = {'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc = create_3D_box(env_object,self.objdim[class_name])", "and append annotations if config['visualize']: #visualize generator.visualize() #store dataset image and info generator.store_image_info()", "mask: (array) Bitmap mask :return segmentationPoly: (list) Segmentation converted to polynoms \"\"\" contours,", "generation \"\"\" data = np.zeros((steps, self.imsize, self.imsize, 3), dtype='f') for t in range(steps):", "if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing {} and {} at episode {} of", "\"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\": bounding_box, \"cuboid\": cuboid, \"projected_cuboid\": projected_cuboid, \"box3D\": box3D, \"projected_3DBB\":", "segmentation format seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str #2 or poly", ":return data_test: (dist) Data structure for testing data \"\"\" data_train = dict( images=[#", "= env.action_space.sample() observation, reward, done, info = env.step(action) if t == config['num_steps']-1 or", "some steps env.render() #only render at the steps/frames we use for dataset for", "to monothically increase self.id_unique = len(data_test['annotations']) + len(data_train['annotations']) print(\"Resuming from image_id {} for", "points in range(7): #p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78, 0.47), lineWidth = 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1],", "print(\"Hard reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False) observation = env.get_observation() env_objects = observation[\"objects\"] for", "object_uid == gripper_uids[0]: class_name = env.robot.get_name() else: env_object_list = list(filter(lambda object: object.uid ==", "{}\".format(image_id, config['num_episodes'])) except FileNotFoundError: image_id = 0 return data_test, data_train, image_id def data_struct_image(self):", "os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define objects to appear in the env, add colors config['used_class_names'] =", "mask) else: cv2.imshow('Labeled object', mask) cv2.waitKey(1000) print(self.data_dict['bbox']) def write_json_end(self): #COCO \"\"\" Write json", "Initial espisode set-up \"\"\" self.id_unique = 0 #image_id*x for paralel dataset generation, otherwise", ") if self.too_small_obj == False: #area ok data['annotations'].append(self.data_dict) #append annotations self.id_unique +=1 else:", ":return data_train: (dict) Data structure for training data :return data_test: (dist) Data structure", "one of 'coco', 'dope', 'vae'!\") #prepare directories config['output_test_folder'] = config['output_folder'] + '/test' config['output_train_folder']", "for training data :return data_test: (dist) Data structure for testing data \"\"\" data_train,", "in data[\"objects\"][-1][\"projected_3DBB\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image),", "== 0: # generator.episode_zero() if episode % config['num_episodes_hard_reset'] == 0: #to prevent objects", "img_mask==object_uid mask = np.expand_dims(mask, axis=2) mask = 255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1) print(class_name) if self.too_small_obj:", "structure Returns: :return data_train: (dict) Data structure for training data :return data_test: (dist)", "#random_pos randomizes the init positions of objects # if episode == 0: #", "on print(\"Hard reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False) observation = env.get_observation() env_objects = observation[\"objects\"]", "with open(os.path.join(path, filename), 'w') as f: json.dump(data, f, indent=4) def get_append_annotations(self): #DOPE cuboid_with_centroid", "= config['output_train_folder'] data = data_train else: folder = config['output_test_folder'] data = data_test json_name", "dataset image and info generator.store_image_info() if config['make_dataset'] in [\"new\", \"resume\"]: cv2.imwrite(os.path.join(path, name), im,", "changing_light_gui = config['changing_light_gui'], shadows_on = config['shadows_on'], color_dict = config['color_dict'], object_sampling_area = config['object_sampling_area'], num_objects_range", "'w') as f: json.dump(self.camera, f, indent=4) filename = \"_object_settings\" + '.json' with open(os.path.join(config['output_test_folder'],", "'/test' config['output_train_folder'] = config['output_folder'] + '/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define objects to", "= len(data_test['annotations']) + len(data_train['annotations']) print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) except", "after {} timesteps\".format(t+1)) break # write JSON annotations every n periods or at", "import BBox3D from myGym.envs.wrappers import RandomizedEnvWrapper import pkg_resources # config, specify here or", "range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through episodes print(\"episode: {}/{}\".format(episode, config['num_episodes'])) #env reset #random_robot randomizes the", "iscrowd, image_id, bbox, category_id, id ], categories = _category_coco_format(), ) data_test = dict(", "+1 if last sample were test (thus not in here). it's safe to", "the main loop for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through episodes print(\"episode: {}/{}\".format(episode,", "numpy as np import os import glob import json import commentjson import sys", "new_value config['color_dict'] = new_dict def _category_coco_format(): #COCO \"\"\" Create list of dictionaries with", "info filename = str(image_id) + '.json' if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing {}", "if self.too_small_obj == False: #area ok data['annotations'].append(self.data_dict) #append annotations self.id_unique +=1 else: #area", "cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D() def draw_bounding_box_3D(self): #DOPE for points in range(7): #p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31,", "object in the scene \"\"\" seg = segmentationToCocoMask(img_mask,object_uid) area = float(mask.area(seg)) bbox =", "np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge robot links obj_ids = [x for x in np.unique(img_mask)]", "with open(config['output_folder']+'/config_dataset.json', 'w') as f: commentjson.dump(config, f) if config['dataset_type'] == 'vae': generator.get_env() dataset_pth", "\"r\") as read_file: clr = json.load(read_file) #json file with suggested colors new_dict =", "ids, just need to monothically increase self.id_unique = len(data_test['annotations']) + len(data_train['annotations']) print(\"Resuming from", "< config['train_test_split_pct'] # bool, test/train data? path = config['output_test_folder'] if isTestSample == True", "env_objects)) if len(env_object_list) > 0: env_object = env_object_list[0] class_name = env_object.get_name() else: continue", "frames from some steps env.render() #only render at the steps/frames we use for", "model training \"\"\" def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} self.env =", "store_image_info(self): #COCO \"\"\" Append COCO dataset image info to corresponding data dict \"\"\"", "= segmentationToCocoMask(img_mask, object_uid) seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str bbox =", "generator.get_env() first_link_uid = env.robot.robot_uid robot_uids = np.array([((x + 1) << 24) + first_link_uid", "4: segmentationPoly.append(contour) return segmentationPoly def create_coco_json(): #COCO \"\"\" Create COCO json data structure", "= cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids = [x for x", "image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [0,255,0], -1) for projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]: image", "[]} for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id)", "for obj in env_objects: if obj.name not in self.objdim.keys(): self.objdim[obj.name] = obj.get_cuboid_dimensions() def", "self.too_small_obj == False: #area ok data['annotations'].append(self.data_dict) #append annotations self.id_unique +=1 else: #area too", "in np.unique(img_mask)] #identify merged objects in the camera view (in the image) #prepare", "== \"__main__\": if len(sys.argv) <= 1: config_path = CONFIG_DEFAULT print('No config.json passed in", "#identify objects(links) in the camera view (in the image) img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0],", "supported format at the moment def get_env(self): \"\"\" Create environment for VAE dataset", "generator if config['dataset_type'] == 'coco': generator = GeneratorCoco() elif config['dataset_type'] == 'dope': generator", "= config['output_folder'] + '/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define objects to appear in", "hard=False) observation = env.get_observation() env_objects = observation[\"objects\"] for t in range(config['num_steps']): #loop through", "== True else data_train name = 'img_{}_cam{}.jpg'.format(image_id,camera_id) return data, name def store_image_info(self): #COCO", "for dataset generation \"\"\" env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True,", "finished after {} timesteps\".format(t+1)) break # write JSON annotations every n periods or", "#loop through kuka and used objects in the image (in the camera view)", "env def episode_zero(self): \"\"\" Initial espisode set-up \"\"\" self.id_unique = 0 #image_id*x for", "} data[\"objects\"].append(self.data_dict) def visualize(self): #DOPE image = im for projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]: image", "through episodes print(\"episode: {}/{}\".format(episode, config['num_episodes'])) #env reset #random_robot randomizes the init position of", "the camera view (in the image) #prepare data strucuture data, name = generator.data_struct_image()", "\"\"\" seg = segmentationToCocoMask(img_mask,object_uid) area = float(mask.area(seg)) bbox = mask.toBbox(seg).flatten().tolist() #1 run length", "env.robot.robot_uid robot_uids = np.array([((x + 1) << 24) + first_link_uid for x in", "tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0], -1)", "self.too_small_obj: cv2.imshow('Too small object', mask) else: cv2.imshow('Labeled object', mask) cv2.waitKey(1000) print(self.data_dict['bbox']) def write_json_end(self):", "{} while len(self.objdim.keys()) < len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation = env.get_observation() env_objects = observation[\"objects\"]", "= config['camera_resolution'], dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) def", "raise SystemExit(0) # initialize pybullet env env = generator.get_env() first_link_uid = env.robot.robot_uid robot_uids", "mask.decode(seg) seg = _segmentationToPoly(bitmap) self.too_small_obj = False try: #notify and skip the object", "observation[\"objects\"] for obj in env_objects: if obj.name not in self.objdim.keys(): self.objdim[obj.name] = obj.get_cuboid_dimensions()", "in [\"new\", \"resume\"]: print(\"Storing {} and {} at episode {} of {}.\".format(filename, name,", "config['output_folder'] + '/train' generator.collect_data(config['num_episodes']) print(\"Dataset finished. Ready to train!\") raise SystemExit(0) # initialize", "if episode == 0: # generator.episode_zero() if episode % config['num_episodes_hard_reset'] == 0: #to", "'{}.jpg'.format(image_id) return data, name def store_image_info(self): #DOPE #write dataset image info filename =", "envs from matplotlib.pyplot import imshow, show import cv2 import numpy as np import", "4, [255,255,0], -1) print(class_name) cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D() def draw_bounding_box_3D(self): #DOPE for points in", "to corresponding data dict \"\"\" data['images'].append(dict( file_name=name, height=im.shape[0], width=im.shape[1], id=image_id,)) def get_append_annotations(self): #COCO", "print(class_name) cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D() def draw_bounding_box_3D(self): #DOPE for points in range(7): #p.addUserDebugLine([0,0,0], [1,2,3],", "for t in range(steps): self.env.reset(random_pos=True) self.env.render() action = [random.uniform(1,2) for x in range(6)]", "input argument CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') # helper functions: def color_names_to_rgb(): \"\"\" Assign", "= dict([x[1:3] for x in config['used_class_names_quantity']]) used_objects = [] for x in config['used_class_names_quantity']:", "need to monothically increase self.id_unique = len(data_test['annotations']) + len(data_train['annotations']) print(\"Resuming from image_id {}", "env_objects = observation[\"objects\"] for t in range(config['num_steps']): #loop through steps # randomize the", "def episode_zero(self): \"\"\" Initial espisode set-up \"\"\" self.id_unique = 0 #image_id*x for paralel", "ID of last generated image in preceding dataset generation \"\"\" try: with open(os.path.join(config['output_test_folder'],'annotations.json'),", "continue data[\"images\"].clear() data[\"annotations\"].clear() class GeneratorDope: #DOPE def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\":", "self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} def get_env(self): #DOPE env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot", "suggested colors new_dict = {} for key, value in config['object_colors'].items(): new_value = []", "class GeneratorVae: \"\"\" Generator class for image dataset for VAE vision model training", "cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0],", "cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\": bounding_box, \"cuboid\": cuboid, \"projected_cuboid\": projected_cuboid, \"box3D\": box3D, \"projected_3DBB\": projected_3DBB,", "import pybullet as p from bbox import BBox3D from myGym.envs.wrappers import RandomizedEnvWrapper import", "print('Too small object of class {} with area={} in img {}'.format(self.too_small_obj, self.data_dict['area'], name))", "def store_image_info(self): #DOPE #write dataset image info filename = str(image_id) + '.json' if", "env.action_space.sample() observation, reward, done, info = env.step(action) if t == config['num_steps']-1 or t%config['make_shot_every_frame']", "tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0], -1) print(class_name) cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D() def draw_bounding_box_3D(self): #DOPE for", "in box3D] if class_name not in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\": class_name, \"segmentation_class_id\": class_id,", "config['color_dict'] == 'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write config.json to output_folder with open(config['output_folder']+'/config_dataset.json', 'w')", "control) action = env.action_space.sample() observation, reward, done, info = env.step(action) if t ==", "the image (in the camera view) if object_uid == robot_uids[0]: class_name = config['robot']", "dataset generation \"\"\" try: with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f: data_test = json.load(f) except:", "when test JSON is empty, which can happen for small numbers try: with", "position of robots #random_pos randomizes the init positions of objects # if episode", "#COCO \"\"\" Resume COCO dataset generation Returns: :return data_train: (dict) Training data from", "box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center class GeneratorCoco: #COCO \"\"\" Generator class for COCO image", "list(boxc) projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id)) for point in cuboid]", "end if episode % config['autosafe_episode'] == 0 or episode == config['num_episodes']-1: generator.write_json_end() data_train,", "Loading default config: {}'.format(CONFIG_DEFAULT)) else: config_path = pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with open(config_path) as file:", "data :return data_test: (dist) Data structure for testing data \"\"\" data_train, data_test =", "for COCO dataset annotations Returns: :return data_train: (dict) Data structure for training data", "of test status Returns: :param data: (dict) Corresponding data dictionary :param name: (string)", "or pass as an input argument CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') # helper functions:", "= list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id)) for point in cuboid] projected_3DBB_centroid =", "cv2.COLOR_RGB2BGR) img = cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize)) cv2.imshow(\"image\", img) cv2.waitKey(1) padding = 6 -", "0.0, 0.99), lineWidth = 4, lifeTime = 1) def write_json_end(self): #DOPE self.camera =", "image_id = 0 return data_test, data_train, image_id def data_struct_image(self): #COCO \"\"\" Assign name", "self.too_small_obj = inv_map[class_id] self.data_dict = dict( id=self.id_unique, image_id=image_id, category_id=class_id, segmentation=seg, area=area, bbox=bbox, iscrowd=0,", "camera_id) json_dict = {\"images\": data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()} if len(data[\"images\"]) > 0: with", "files_train = [int(x.replace('.jpg','')) for x in os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")] image_id = max(max(files_test),max(files_train)) print(\"Resuming", "{'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc = create_3D_box(env_object,self.objdim[class_name]) box3D = [] for x", "= \"_camera_settings\" + '.json' print(\"Storing {}.\".format(filename)) with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.camera,", "COCO data structure :return data_test: (dist) Testing data from preceding dataset generation in", "= config['robot'], render_on = True, gui_on = config['gui_on'], show_bounding_boxes_gui = config['show_bounding_boxes_gui'], changing_light_gui =", "dataset image info filename = str(image_id) + '.json' if config['make_dataset'] in [\"new\", \"resume\"]:", "pycocotools import mask import pybullet as p from bbox import BBox3D from myGym.envs.wrappers", "#DOPE for points in range(7): #p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78, 0.47), lineWidth = 10)", "data = np.zeros((steps, self.imsize, self.imsize, 3), dtype='f') for t in range(steps): self.env.reset(random_pos=True) self.env.render()", "of robots #random_pos randomizes the init positions of objects # if episode ==", "isTestSample == True else data_train name = '{}.jpg'.format(image_id) return data, name def store_image_info(self):", "\"\"\" self.id_unique = 0 #image_id*x for paralel dataset generation, otherwise 0 def init_data(self):", "write_json_end(self): #DOPE self.camera = {\"camera_settings\": []} for c in range(np.count_nonzero(config['active_cameras'])): #loop through active", "config['output_folder'] + '/test' config['output_train_folder'] = config['output_folder'] + '/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define", "os.remove(f) files = glob.glob(os.path.join(config['output_train_folder'],'./*')) for f in files: os.remove(f) data_train, data_test = generator.init_data()", "object_uid, env_objects)) if len(env_object_list) > 0: env_object = env_object_list[0] class_name = env_object.get_name() else:", "be realistically seen print('Too small object of class {} with area={} in img", "== 'resume': #resume print(\"Restoring dataset generation\") data_test, data_train, image_id = generator.resume() elif (config['make_dataset']", "assert(area > config['min_obj_area']) assert(len(seg)>0 and len(seg[0])>0) except: #make inverse map id->name (just to", "= cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly = [] for contour in contours: contour =", "(dict) Training data from preceding dataset generation in COCO data structure :return data_test:", "dict( id=self.id_unique, image_id=image_id, category_id=class_id, segmentation=seg, area=area, bbox=bbox, iscrowd=0, ) if self.too_small_obj == False:", "im, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) if done: print(\"Episode finished after {} timesteps\".format(t+1)) break # write", "in range(steps): self.env.reset(random_pos=True) self.env.render() action = [random.uniform(1,2) for x in range(6)] #action =", "env_object = env_object_list[0] class_name = env_object.get_name() else: continue if class_name in config['used_class_names']: class_id", "x in np.unique(img_mask)] #identify merged objects in the camera view (in the image)", "generation >0, otherwise 0 if config['make_dataset'] == \"new\": #cleanup files files = glob.glob(os.path.join(config['output_test_folder'],'./*'))", "__name__ == \"__main__\": if len(sys.argv) <= 1: config_path = CONFIG_DEFAULT print('No config.json passed", "data_test = generator.init_data() generator.episode_zero() elif config['make_dataset'] == 'resume': #resume print(\"Restoring dataset generation\") data_test,", "#1 run length encoding RLE segmentation format seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format", "\"\"\" data = np.zeros((steps, self.imsize, self.imsize, 3), dtype='f') for t in range(steps): self.env.reset(random_pos=True)", "= list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id)) for point in box3D] if class_name", "config file \"\"\" with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\") as read_file: clr = json.load(read_file) #json", "of image file for saving \"\"\" data = data_test if isTestSample == True", "gripper_uids[0]: class_name = env.robot.get_name() else: env_object_list = list(filter(lambda object: object.uid == object_uid, env_objects))", "Create COCO json data structure Returns: :return data_train: (dict) Data structure for training", "lineWidth = 4, lifeTime = 1) def write_json_end(self): #DOPE self.camera = {\"camera_settings\": []}", "Ready to train!\") raise SystemExit(0) # initialize pybullet env env = generator.get_env() first_link_uid", "segmentationPoly.append(contour) return segmentationPoly def create_coco_json(): #COCO \"\"\" Create COCO json data structure Returns:", "if config['make_dataset'] in [\"new\", \"resume\"]: cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) if done: print(\"Episode", "< len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation = env.get_observation() env_objects = observation[\"objects\"] for obj in", "create_3D_box(env_object,self.objdim[class_name]) box3D = [] for x in range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc = list(boxc) projected_cuboid_centroid", "and skip the object with too small visible representation assert(area > config['min_obj_area']) assert(len(seg)>0", "in argument. Loading default config: {}'.format(CONFIG_DEFAULT)) else: config_path = pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with open(config_path)", "script to generate train/test sets from the simulator in COCO or DOPE format.", "import imshow, show import cv2 import numpy as np import os import glob", "config['used_class_names'] = dict([x[1:3] for x in config['used_class_names_quantity']]) used_objects = [] for x in", "if done: print(\"Episode finished after {} timesteps\".format(t+1)) break # write JSON annotations every", "= self.init_data() data = data_test if isTestSample == True else data_train name =", "files files = glob.glob(os.path.join(config['output_test_folder'],'./*')) for f in files: os.remove(f) files = glob.glob(os.path.join(config['output_train_folder'],'./*')) for", "def create_3D_box(env_object,objdim): #DOPE objpos = env_object.get_position() objorient = env_object.get_orientation() #objdim = env_object.get_cuboid_dimensions() box=", "\"exported_objects\": []} def get_env(self): #DOPE env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on =", "used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset = True, ), config_path", "generate train/test sets from the simulator in COCO or DOPE format. Used for", "in range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc = list(boxc) projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid = [list(env.project_point_to_camera_image(point,", "> 0: with open(os.path.join(folder,json_name), 'w') as f: json.dump(json_dict, f, indent=4) # clear data", "generator.episode_zero() # the main loop for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through episodes", "with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w')", "for COCO dataset generation according to dataset config file Returns: :return env: (object)", "x.endswith(\".jpg\")] image_id = max(max(files_test),max(files_train)) print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) self.episode_zero()", "image_id = 0 #for paralel dataset generation >0, otherwise 0 if config['make_dataset'] ==", "= [list(env.project_point_to_camera_image(point, camera_id)) for point in box3D] if class_name not in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name)", "data_train, image_id = generator.resume() elif (config['make_dataset'] == \"display\"): data_train, data_test = generator.init_data() generator.episode_zero()", ":param data: (dict) Corresponding data dictionary :param name: (string) Name of image file", "class GeneratorDope: #DOPE def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} def get_env(self):", "cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0],", "were test (thus not in here). it's safe to have holes in the", "= [] for value, key in config['used_class_names'].items(): categories.append({\"id\": int(key), \"name\": str(value)}) return categories", "= generator.init_data() generator.episode_zero() # the main loop for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop", "with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f: data_test = json.load(f) except: pass #happens when test", "-9.81) def collect_data(self, steps): \"\"\" Collect data for VAE dataset Parameters: :param steps:", "obj.name not in self.objdim.keys(): self.objdim[obj.name] = obj.get_cuboid_dimensions() def init_data(self): #DOPE data_train = {\"objects\":[]}", "= padding * \"0\" + str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img) data[t] = img print(\"Image", "\"__main__\": if len(sys.argv) <= 1: config_path = CONFIG_DEFAULT print('No config.json passed in argument.", "4, [255,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0], -1) print(class_name) cv2.imshow('image',image)", "os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define objects to appear in the env, add colors", "cv2.imshow('image',im) cv2.waitKey(1) print(class_name) if self.too_small_obj: cv2.imshow('Too small object', mask) else: cv2.imshow('Labeled object', mask)", "cuboid, \"projected_cuboid\": projected_cuboid, \"box3D\": box3D, \"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict) def visualize(self):", "[\"new\", \"resume\"]: filename = \"_camera_settings\" + '.json' print(\"Storing {}.\".format(filename)) with open(os.path.join(config['output_test_folder'], filename), 'w')", "segmentationToCocoMask(img_mask,object_uid) area = float(mask.area(seg)) bbox = mask.toBbox(seg).flatten().tolist() #1 run length encoding RLE segmentation", ":param steps: (int) Number of episodes initiated during dataset generation \"\"\" data =", "not in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\": class_name, \"segmentation_class_id\": class_id, \"cuboid_dimensions\": self.objdim[class_name] }) self.data_dict", "objects by name as specified in the training config file \"\"\" with open(pkg_resources.resource_filename(\"myGym\",", "cv2.waitKey(1000) self.draw_bounding_box_3D() def draw_bounding_box_3D(self): #DOPE for points in range(7): #p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78,", "f, indent=4) def get_append_annotations(self): #DOPE cuboid_with_centroid = env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg = segmentationToCocoMask(img_mask,", "+ '.json' if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing {} and {} at episode", "= observation[\"objects\"] for obj in env_objects: if obj.name not in self.objdim.keys(): self.objdim[obj.name] =", "#DOPE #write dataset image info filename = str(image_id) + '.json' if config['make_dataset'] in", "dataset for VAE vision model training \"\"\" def __init__(self): self.object_settings = {\"exported_object_classes\": [],", "= config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) def collect_data(self, steps): \"\"\" Collect data for VAE", "steps)) self.env.close() # main if __name__ == \"__main__\": if len(sys.argv) <= 1: config_path", "#loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id) captured_image_size = {\"width\": config['camera_resolution'][0], \"height\":", "test status Returns: :param data: (dict) Corresponding data dictionary :param name: (string) Name", "an input argument CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') # helper functions: def color_names_to_rgb(): \"\"\"", "projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id)) for point in box3D] if", "get_env(self): \"\"\" Create environment for VAE dataset generation according to dataset config file", "cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids = [x for x in", "preceding dataset generation in COCO data structure :return image_id: (int) ID of last", "[] for x in range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc = list(boxc) projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id))", "glob.glob(os.path.join(config['output_train_folder'],'./*')) for f in files: os.remove(f) data_train, data_test = generator.init_data() generator.episode_zero() elif config['make_dataset']", "and continue data[\"images\"].clear() data[\"annotations\"].clear() class GeneratorDope: #DOPE def __init__(self): self.object_settings = {\"exported_object_classes\": [],", "of {}.\".format(episode, config['num_episodes'])) for flag in ['test','train']: if flag == 'train': folder =", "Training data from preceding dataset generation in COCO data structure :return data_test: (dist)", "= 'img_{}_cam{}.jpg'.format(image_id,camera_id) return data, name def store_image_info(self): #COCO \"\"\" Append COCO dataset image", "#DOPE try: files_test = [int(x.replace('.jpg','')) for x in os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")] files_train =", "<< 24) + first_link_uid for x in range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids = np.array([((x +", "view) if object_uid == robot_uids[0]: class_name = config['robot'] elif object_uid == gripper_uids[0]: class_name", "indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4) class GeneratorVae: \"\"\"", "[255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0], -1) image = cv2.circle(cv2.UMat(image),", "generator = GeneratorCoco() elif config['dataset_type'] == 'dope': generator = GeneratorDope() elif config['dataset_type'] ==", "config['output_test_folder'] = config['output_folder'] + '/test' config['output_train_folder'] = config['output_folder'] + '/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"],", "annotations if config['visualize']: #visualize generator.visualize() #store dataset image and info generator.store_image_info() if config['make_dataset']", "COCO dataset image info to corresponding data dict \"\"\" data['images'].append(dict( file_name=name, height=im.shape[0], width=im.shape[1],", "(int) Number of episodes initiated during dataset generation \"\"\" data = np.zeros((steps, self.imsize,", "data strucuture data, name = generator.data_struct_image() for object_uid in obj_ids: #loop through kuka", "for x in np.unique(img_mask)] #identify merged objects in the camera view (in the", ":return env: (object) Environment for dataset generation \"\"\" env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot =", "data_train['images']] image_id = max(img_ids) +1 # +1 if last sample were test (thus", "'.json' if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing {} and {} at episode {}", "to appear in the env, add colors config['used_class_names'] = dict([x[1:3] for x in", "24) + first_link_uid for x in range(env.robot.gripper_index, env.robot.num_joints + 1)]) # check mode", "print) inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj = inv_map[class_id] self.data_dict = dict( id=self.id_unique, image_id=image_id,", "in str bbox = mask.toBbox(seg).flatten().tolist() bounding_box = {'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc", "open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\") as read_file: clr = json.load(read_file) #json file with suggested colors", "\"\"\" data = data_test if isTestSample == True else data_train name = 'img_{}_cam{}.jpg'.format(image_id,camera_id)", "episode {} of {}.\".format(filename, name, episode, config['num_episodes'])) with open(os.path.join(path, filename), 'w') as f:", "as f: data_test = json.load(f) except: pass #happens when test JSON is empty,", "False try: #notify and skip the object with too small visible representation assert(area", "img) cv2.waitKey(1) padding = 6 - len(str(t+7999)) name = padding * \"0\" +", "image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) self.episode_zero() except FileNotFoundError: image_id = 0 return", "categories = _category_coco_format(), ) data_test = dict( images=[# file_name, height, width, id ],", "for x in range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids = np.array([((x + 1) << 24) +", "through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id = image_id + 1 #unique isTestSample = np.random.random_sample()", "the moment def get_env(self): \"\"\" Create environment for VAE dataset generation according to", "of dictionaries with category id-name pairs in MSCOCO format Returns: :return categories: (list)", "config['color_randomizer']['exclude'].append(\"objects\") #write config.json to output_folder with open(config['output_folder']+'/config_dataset.json', 'w') as f: commentjson.dump(config, f) if", "segmentationPoly = [] for contour in contours: contour = contour.flatten().tolist() if len(contour) >", "first_link_uid for x in range(env.robot.gripper_index, env.robot.num_joints + 1)]) # check mode of writing", "height=im.shape[0], width=im.shape[1], id=image_id,)) def get_append_annotations(self): #COCO \"\"\" Make and append COCO annotations for", "is on print(\"Hard reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False) observation = env.get_observation() env_objects =", "or poly segmentation format bitmap = mask.decode(seg) seg = _segmentationToPoly(bitmap) self.too_small_obj = False", "data[\"objects\"].append(self.data_dict) def visualize(self): #DOPE image = im for projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]: image =", "% config['num_episodes_hard_reset'] == 0: #to prevent objects vanishing when GUI is on print(\"Hard", "if self.too_small_obj: cv2.imshow('Too small object', mask) else: cv2.imshow('Labeled object', mask) cv2.waitKey(1000) print(self.data_dict['bbox']) def", "'w') as f: json.dump(self.camera, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.camera,", "pass #happens when test JSON is empty, which can happen for small numbers", "= \"_object_settings\" + '.json' with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4)", "the steps/frames we use for dataset for c in range(np.count_nonzero(config['active_cameras'])): #loop through active", "= json.load(f) # get the ID of last image img_ids = [img[\"id\"] for", "'img_{}_cam{}.jpg'.format(image_id,camera_id) return data, name def store_image_info(self): #COCO \"\"\" Append COCO dataset image info", "prevent objects vanishing when GUI is on print(\"Hard reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False)", "in COCO data structure :return image_id: (int) ID of last generated image in", "#objdim = env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center class GeneratorCoco: #COCO \"\"\" Generator class", "{} for episodes: {}\".format(image_id, config['num_episodes'])) except FileNotFoundError: image_id = 0 return data_test, data_train,", "for object_uid in obj_ids: #loop through kuka and used objects in the image", "config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing annotations.json at episode {} of {}.\".format(episode, config['num_episodes'])) for", "\"\"\" Create environment for COCO dataset generation according to dataset config file Returns:", "at the steps/frames we use for dataset for c in range(np.count_nonzero(config['active_cameras'])): #loop through", "+ 1) << 24) + first_link_uid for x in range(env.robot.gripper_index, env.robot.num_joints + 1)])", "color_dict = config['color_dict'], object_sampling_area = config['object_sampling_area'], num_objects_range = config['num_objects_range'], used_objects = used_objects, active_cameras", "img {}'.format(self.too_small_obj, self.data_dict['area'], name)) def visualize(self): #COCO \"\"\" Visualize mask and bounding box", "def store_image_info(self): #COCO \"\"\" Append COCO dataset image info to corresponding data dict", "isTestSample = np.random.random_sample() < config['train_test_split_pct'] # bool, test/train data? path = config['output_test_folder'] if", "test (thus not in here). it's safe to have holes in the ids,", "image) #prepare data strucuture data, name = generator.data_struct_image() for object_uid in obj_ids: #loop", "env_object.get_orientation() #objdim = env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center class GeneratorCoco: #COCO \"\"\" Generator", "env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False) observation = env.get_observation() env_objects = observation[\"objects\"] for t in range(config['num_steps']):", "dataset for YOLACT vision model training \"\"\" def __init__(self): pass def get_env(self): #COCO", "segmentationPoly def create_coco_json(): #COCO \"\"\" Create COCO json data structure Returns: :return data_train:", "Assign RGB colors to objects by name as specified in the training config", "config['used_class_names_quantity']: for _ in range(x[0]): used_objects.append(x[1]) if config['color_dict'] == 'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\")", "camera view (in the image) img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge gripper links", "self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} self.env = None self.imsize = config[\"imsize\"] #", "for points in range(7): #p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78, 0.47), lineWidth = 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001)", "list of dictionaries with category id-name pairs in MSCOCO format Returns: :return categories:", "files: os.remove(f) data_train, data_test = generator.init_data() generator.episode_zero() elif config['make_dataset'] == 'resume': #resume print(\"Restoring", "= commentjson.load(file) # initialize dataset generator if config['dataset_type'] == 'coco': generator = GeneratorCoco()", "= json.load(read_file) #json file with suggested colors new_dict = {} for key, value", "'w') as f: json.dump(self.object_settings, f, indent=4) class GeneratorVae: \"\"\" Generator class for image", "if x.endswith(\".jpg\")] image_id = max(max(files_test),max(files_train)) print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes']))", "## script to generate train/test sets from the simulator in COCO or DOPE", "self.objdim = {} while len(self.objdim.keys()) < len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation = env.get_observation() env_objects", "strucuture data, name = generator.data_struct_image() for object_uid in obj_ids: #loop through kuka and", "reset #random_robot randomizes the init position of robots #random_pos randomizes the init positions", "DOPE format. Used for vision training. import gym from myGym import envs from", "camera_id=np.nonzero(config['active_cameras'])[0][c] image_id = image_id + 1 #unique isTestSample = np.random.random_sample() < config['train_test_split_pct'] #", "config['used_class_names'].keys())) self.too_small_obj = inv_map[class_id] self.data_dict = dict( id=self.id_unique, image_id=image_id, category_id=class_id, segmentation=seg, area=area, bbox=bbox,", ")) if config['make_dataset'] in [\"new\", \"resume\"]: filename = \"_camera_settings\" + '.json' print(\"Storing {}.\".format(filename))", "os import glob import json import commentjson import sys import random from pycocotools.cocostuffhelper", "loop for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through episodes print(\"episode: {}/{}\".format(episode, config['num_episodes'])) #env", "id=self.id_unique, image_id=image_id, category_id=class_id, segmentation=seg, area=area, bbox=bbox, iscrowd=0, ) if self.too_small_obj == False: #area", "= mask.toBbox(seg).flatten().tolist() bounding_box = {'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc = create_3D_box(env_object,self.objdim[class_name]) box3D", "== config['num_steps']-1 or t%config['make_shot_every_frame'] == 0: # we only use frames from some", "True else data_train name = 'img_{}_cam{}.jpg'.format(image_id,camera_id) return data, name def store_image_info(self): #COCO \"\"\"", "\"0\" + str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img) data[t] = img print(\"Image {}/{}\".format(t, steps)) self.env.close()", "'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write config.json to output_folder with open(config['output_folder']+'/config_dataset.json', 'w') as f:", "vision model training \"\"\" def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} self.env", "main if __name__ == \"__main__\": if len(sys.argv) <= 1: config_path = CONFIG_DEFAULT print('No", "randomizes the init positions of objects # if episode == 0: # generator.episode_zero()", "categories.append({\"id\": int(key), \"name\": str(value)}) return categories def _segmentationToPoly(mask, ): \"\"\" Convert segmentation from", "import segmentationToCocoMask, segmentationToCocoResult from pycocotools import mask import pybullet as p from bbox", "train/test sets from the simulator in COCO or DOPE format. Used for vision", "in the ids, just need to monothically increase self.id_unique = len(data_test['annotations']) + len(data_train['annotations'])", "data_test: (dist) Data structure for testing data \"\"\" data_train = dict( images=[# file_name,", "'vae': generator.get_env() dataset_pth = config['output_folder'] + '/train' generator.collect_data(config['num_episodes']) print(\"Dataset finished. Ready to train!\")", "p.setGravity(0, 0, -9.81) return env def episode_zero(self): \"\"\" Initial espisode set-up \"\"\" self.id_unique", "in files: os.remove(f) files = glob.glob(os.path.join(config['output_train_folder'],'./*')) for f in files: os.remove(f) data_train, data_test", "+ '/train' generator.collect_data(config['num_episodes']) print(\"Dataset finished. Ready to train!\") raise SystemExit(0) # initialize pybullet", "0: with open(os.path.join(folder,json_name), 'w') as f: json.dump(json_dict, f, indent=4) # clear data and", "= new_dict def _category_coco_format(): #COCO \"\"\" Create list of dictionaries with category id-name", "cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id) captured_image_size = {\"width\": config['camera_resolution'][0], \"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\"", "== True else config['output_train_folder'] #get dataset image and its mask im = observation[\"camera_data\"][camera_id][\"image\"]", "used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset = True,", "config['robot'], render_on = True, gui_on = config['gui_on'], show_bounding_boxes_gui = config['show_bounding_boxes_gui'], changing_light_gui = config['changing_light_gui'],", "'w') as f: json.dump(json_dict, f, indent=4) # clear data and continue data[\"images\"].clear() data[\"annotations\"].clear()", "= 1) def write_json_end(self): #DOPE self.camera = {\"camera_settings\": []} for c in range(np.count_nonzero(config['active_cameras'])):", "image dataset for VAE vision model training \"\"\" def __init__(self): self.object_settings = {\"exported_object_classes\":", "color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write config.json to output_folder with open(config['output_folder']+'/config_dataset.json', 'w') as f: commentjson.dump(config,", "= config['active_cameras'], camera_resolution = config['camera_resolution'], dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0,", "= { \"class\": class_name, \"class_id\": class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid,", "init position of robots #random_pos randomizes the init positions of objects # if", "data_train = {\"objects\":[]} data_test = {\"objects\":[]} return data_train, data_test def resume(self): #DOPE try:", "in MSCOCO format \"\"\" categories = [] for value, key in config['used_class_names'].items(): categories.append({\"id\":", "self.id_unique +=1 else: #area too small to be realistically seen print('Too small object", "iscrowd=0, ) if self.too_small_obj == False: #area ok data['annotations'].append(self.data_dict) #append annotations self.id_unique +=1", "done, info = env.step(action) if t == config['num_steps']-1 or t%config['make_shot_every_frame'] == 0: #", "\"bounding_box\": bounding_box, \"cuboid\": cuboid, \"projected_cuboid\": projected_cuboid, \"box3D\": box3D, \"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid, }", "[bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc = create_3D_box(env_object,self.objdim[class_name]) box3D = [] for x in range(boxp.shape[0]): box3D.append(tuple(boxp[x]))", "segmentationToCocoResult from pycocotools import mask import pybullet as p from bbox import BBox3D", "generation according to dataset config file Returns: :return env: (object) Environment for dataset", "pybullet as p from bbox import BBox3D from myGym.envs.wrappers import RandomizedEnvWrapper import pkg_resources", "#random_robot randomizes the init position of robots #random_pos randomizes the init positions of", "filename), 'w') as f: json.dump(self.camera, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f:", "in [\"new\", \"resume\"]: filename = \"_camera_settings\" + '.json' print(\"Storing {}.\".format(filename)) with open(os.path.join(config['output_test_folder'], filename),", "ok data['annotations'].append(self.data_dict) #append annotations self.id_unique +=1 else: #area too small to be realistically", "#happens when test JSON is empty, which can happen for small numbers try:", "observation[\"camera_data\"][camera_id][\"image\"] im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors img_mask = observation[\"camera_data\"][camera_id][\"segmentation_mask\"] obj_ids = [x", "[x for x in np.unique(img_mask)] #identify objects(links) in the camera view (in the", "-1) print(class_name) cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D() def draw_bounding_box_3D(self): #DOPE for points in range(7): #p.addUserDebugLine([0,0,0],", "#COCO \"\"\" Append COCO dataset image info to corresponding data dict \"\"\" data['images'].append(dict(", "YOLACT vision model training \"\"\" def __init__(self): pass def get_env(self): #COCO \"\"\" Create", "\"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()} if len(data[\"images\"]) > 0: with open(os.path.join(folder,json_name), 'w') as f: json.dump(json_dict,", "np import os import glob import json import commentjson import sys import random", "dict( images=[# file_name, height, width, id ], type='instances', annotations=[# segmentation, area, iscrowd, image_id,", "files = glob.glob(os.path.join(config['output_train_folder'],'./*')) for f in files: os.remove(f) data_train, data_test = generator.init_data() generator.episode_zero()", "polynoms \"\"\" contours, _ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly = [] for contour", "(in the image) img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge gripper links img_mask =", "in data_train['images']] image_id = max(img_ids) +1 # +1 if last sample were test", "pretty print) inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj = inv_map[class_id] self.data_dict = dict( id=self.id_unique,", "= {\"width\": config['camera_resolution'][0], \"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\" + str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, )) if", "range(config['num_steps']): #loop through steps # randomize the movements of robots (using joint control)", "data_struct_image(self): #DOPE data_train, data_test = self.init_data() data = data_test if isTestSample == True", "data_train, data_test = create_coco_json() return data_train, data_test def resume(self): #COCO \"\"\" Resume COCO", "vision model training \"\"\" def __init__(self): pass def get_env(self): #COCO \"\"\" Create environment", "= dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj = inv_map[class_id] self.data_dict = dict( id=self.id_unique, image_id=image_id, category_id=class_id, segmentation=seg,", "config['used_class_names'][class_name] generator.get_append_annotations() #annotate and append annotations if config['visualize']: #visualize generator.visualize() #store dataset image", "range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc = list(boxc) projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id))", "config['object_sampling_area'], num_objects_range = config['num_objects_range'], used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'],", "_segmentationToPoly(bitmap) self.too_small_obj = False try: #notify and skip the object with too small", "-1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int,", "def data_struct_image(self): #DOPE data_train, data_test = self.init_data() data = data_test if isTestSample ==", "gripper_uids[0], img_mask) #merge gripper links img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge robot links", "is empty, which can happen for small numbers try: with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as", "categories def _segmentationToPoly(mask, ): \"\"\" Convert segmentation from RLE to polynoms ([[x1 y1", "[int(x.replace('.jpg','')) for x in os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")] files_train = [int(x.replace('.jpg','')) for x in", "env, add colors config['used_class_names'] = dict([x[1:3] for x in config['used_class_names_quantity']]) used_objects = []", "\"\"\" Make and append COCO annotations for each object in the scene \"\"\"", "file Returns: :return env: (object) Environment for dataset generation \"\"\" env = RandomizedEnvWrapper(env=gym.make(config['env_name'],", "from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param mask: (array) Bitmap mask :return segmentationPoly: (list) Segmentation converted", "Exception(\"dataset_type in config: use one of 'coco', 'dope', 'vae'!\") #prepare directories config['output_test_folder'] =", "+ len(data_train['annotations']) print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) except FileNotFoundError: image_id", "= {\"exported_object_classes\": [], \"exported_objects\": []} self.env = None self.imsize = config[\"imsize\"] # only", "range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id) captured_image_size = {\"width\": config['camera_resolution'][0],", "the training config file \"\"\" with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\") as read_file: clr =", "(list) Categories in MSCOCO format \"\"\" categories = [] for value, key in", "env.get_camera_opencv_matrix_values(camera_id) captured_image_size = {\"width\": config['camera_resolution'][0], \"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\" + str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size,", "#points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0, 0.99), lineWidth = 4, lifeTime = 1) def write_json_end(self):", "= 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0, 0.99), lineWidth = 4, lifeTime = 1)", "= [2,2,2,2,2,2] self.env.robot.reset_random(action) # send the Kuka arms up observation, reward, done, info", "elif object_uid == gripper_uids[0]: class_name = env.robot.get_name() else: env_object_list = list(filter(lambda object: object.uid", "\"resume\"]: cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) if done: print(\"Episode finished after {} timesteps\".format(t+1))", "as p from bbox import BBox3D from myGym.envs.wrappers import RandomizedEnvWrapper import pkg_resources #", "pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with open(config_path) as file: config = commentjson.load(file) # initialize dataset generator", "= config['object_sampling_area'], num_objects_range = config['num_objects_range'], used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution =", "Returns: :param data: (dict) Corresponding data dictionary :param name: (string) Name of image", "config['robot'] elif object_uid == gripper_uids[0]: class_name = env.robot.get_name() else: env_object_list = list(filter(lambda object:", "camera_id)) for point in cuboid] projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id))", "{} of {}.\".format(filename, name, episode, config['num_episodes'])) with open(os.path.join(path, filename), 'w') as f: json.dump(data,", "value: new_value.append(clr[item]) new_dict[key] = new_value config['color_dict'] = new_dict def _category_coco_format(): #COCO \"\"\" Create", "= used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset = True, ),", "show import cv2 import numpy as np import os import glob import json", "steps/frames we use for dataset for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras", "list(filter(lambda object: object.uid == object_uid, env_objects)) if len(env_object_list) > 0: env_object = env_object_list[0]", "elif config['dataset_type'] == 'vae': generator = GeneratorVae() else: raise Exception(\"dataset_type in config: use", "self.imsize)) cv2.imshow(\"image\", img) cv2.waitKey(1) padding = 6 - len(str(t+7999)) name = padding *", "isTestSample == True else data_train name = 'img_{}_cam{}.jpg'.format(image_id,camera_id) return data, name def store_image_info(self):", "as f: json.dump(self.camera, f, indent=4) filename = \"_object_settings\" + '.json' with open(os.path.join(config['output_test_folder'], filename),", "image info to corresponding data dict \"\"\" data['images'].append(dict( file_name=name, height=im.shape[0], width=im.shape[1], id=image_id,)) def", "config['color_dict'] = new_dict def _category_coco_format(): #COCO \"\"\" Create list of dictionaries with category", "= cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4,", "\"\"\" Create environment for VAE dataset generation according to dataset config file \"\"\"", "info = self.env.step(action) img = observation['camera_data'][6]['image'] imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.resize(imgs[0:450,100:500],", "BBox3D from myGym.envs.wrappers import RandomizedEnvWrapper import pkg_resources # config, specify here or pass", "json data structure Returns: :return data_train: (dict) Data structure for training data :return", "x in os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")] files_train = [int(x.replace('.jpg','')) for x in os.listdir(config['output_train_folder']) if", "Generator class for COCO image dataset for YOLACT vision model training \"\"\" def", "random_pos=False) observation = env.get_observation() env_objects = observation[\"objects\"] for obj in env_objects: if obj.name", "1: config_path = CONFIG_DEFAULT print('No config.json passed in argument. Loading default config: {}'.format(CONFIG_DEFAULT))", "{}\".format(image_id, config['num_episodes'])) self.episode_zero() except FileNotFoundError: image_id = 0 return self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self): #DOPE", "= np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge robot links obj_ids = [x for x in", "otherwise 0 def init_data(self): #COCO \"\"\" Initialize data structures for COCO dataset annotations", "specified in the training config file \"\"\" with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\") as read_file:", "resume(self): #COCO \"\"\" Resume COCO dataset generation Returns: :return data_train: (dict) Training data", "env_objects: if obj.name not in self.objdim.keys(): self.objdim[obj.name] = obj.get_cuboid_dimensions() def init_data(self): #DOPE data_train", "0 return self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self): #DOPE data_train, data_test = self.init_data() data = data_test", ">0, otherwise 0 if config['make_dataset'] == \"new\": #cleanup files files = glob.glob(os.path.join(config['output_test_folder'],'./*')) for", "and len(seg[0])>0) except: #make inverse map id->name (just to pretty print) inv_map =", "-1) for projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [255,0,0], -1)", "object_uid in obj_ids: #loop through kuka and used objects in the image (in", "periods or at the end if episode % config['autosafe_episode'] == 0 or episode", "to be realistically seen print('Too small object of class {} with area={} in", "#COCO \"\"\" Create list of dictionaries with category id-name pairs in MSCOCO format", "{} for episodes: {}\".format(image_id, config['num_episodes'])) self.episode_zero() except FileNotFoundError: image_id = 0 return self.init_data()[0],self.init_data()[1],image_id", "{\"exported_object_classes\": [], \"exported_objects\": []} self.env = None self.imsize = config[\"imsize\"] # only supported", "action = env.action_space.sample() observation, reward, done, info = env.step(action) if t == config['num_steps']-1", "GeneratorCoco: #COCO \"\"\" Generator class for COCO image dataset for YOLACT vision model", "# helper functions: def color_names_to_rgb(): \"\"\" Assign RGB colors to objects by name", "% config['autosafe_episode'] == 0 or episode == config['num_episodes']-1: generator.write_json_end() data_train, data_test = generator.init_data()", "skip the object with too small visible representation assert(area > config['min_obj_area']) assert(len(seg)>0 and", "def write_json_end(self): #COCO \"\"\" Write json file with COCO annotations to output directory", "], categories = _category_coco_format(), ) return data_train, data_test def create_3D_box(env_object,objdim): #DOPE objpos =", "dataset generator if config['dataset_type'] == 'coco': generator = GeneratorCoco() elif config['dataset_type'] == 'dope':", "_category_coco_format(), ) data_test = dict( images=[# file_name, height, width, id ], type='instances', annotations=[#", "last image img_ids = [img[\"id\"] for img in data_train['images']] image_id = max(img_ids) +1", "data_train, data_test = generator.init_data() generator.episode_zero() elif config['make_dataset'] == 'resume': #resume print(\"Restoring dataset generation\")", "annotated object \"\"\" mask = img_mask==object_uid mask = np.expand_dims(mask, axis=2) mask = 255*mask.astype('uint8')", "n periods or at the end if episode % config['autosafe_episode'] == 0 or", "# config, specify here or pass as an input argument CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\",", "area=area, bbox=bbox, iscrowd=0, ) if self.too_small_obj == False: #area ok data['annotations'].append(self.data_dict) #append annotations", "p.setGravity(0, 0, -9.81) def collect_data(self, steps): \"\"\" Collect data for VAE dataset Parameters:", "for training data :return data_test: (dist) Data structure for testing data \"\"\" data_train", "= data_test if isTestSample == True else data_train name = '{}.jpg'.format(image_id) return data,", "config['num_episodes'])) #env reset #random_robot randomizes the init position of robots #random_pos randomizes the", "#DOPE cuboid_with_centroid = env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg = segmentationToCocoMask(img_mask, object_uid) seg['counts'] = str(seg['counts'],", "segmentation format bitmap = mask.decode(seg) seg = _segmentationToPoly(bitmap) self.too_small_obj = False try: #notify", "in np.unique(img_mask)] #identify objects(links) in the camera view (in the image) img_mask =", "we use for dataset for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c]", "for YOLACT vision model training \"\"\" def __init__(self): pass def get_env(self): #COCO \"\"\"", "data_test if isTestSample == True else data_train name = 'img_{}_cam{}.jpg'.format(image_id,camera_id) return data, name", "p from bbox import BBox3D from myGym.envs.wrappers import RandomizedEnvWrapper import pkg_resources # config,", "file \"\"\" self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on =", "range(7): #p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78, 0.47), lineWidth = 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0,", "training \"\"\" def __init__(self): pass def get_env(self): #COCO \"\"\" Create environment for COCO", "= cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0], -1) print(class_name) cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D() def draw_bounding_box_3D(self):", "np.unique(img_mask)] #identify merged objects in the camera view (in the image) #prepare data", "steps env.render() #only render at the steps/frames we use for dataset for c", "COCO data structure :return image_id: (int) ID of last generated image in preceding", "observation, reward, done, info = self.env.step(action) img = observation['camera_data'][6]['image'] imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)", "config['num_episodes']): #loop through episodes print(\"episode: {}/{}\".format(episode, config['num_episodes'])) #env reset #random_robot randomizes the init", "= list(filter(lambda object: object.uid == object_uid, env_objects)) if len(env_object_list) > 0: env_object =", "from RLE to polynoms ([[x1 y1 x2 x2 y2 ...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830.", "env.robot.num_joints + 1)]) # check mode of writing files image_id = 0 #for", "dataset image info to corresponding data dict \"\"\" data['images'].append(dict( file_name=name, height=im.shape[0], width=im.shape[1], id=image_id,))", "def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} self.env = None self.imsize =", "\"projected_cuboid\": projected_cuboid, \"box3D\": box3D, \"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict) def visualize(self): #DOPE", "tuple(map(int, projected_cuboid_point)), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0], -1)", "robot_uids[0], img_mask) #merge robot links obj_ids = [x for x in np.unique(img_mask)] #identify", "dataset generation in COCO data structure :return data_test: (dist) Testing data from preceding", "COCO annotations to output directory \"\"\" if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing annotations.json", "print(\"Image {}/{}\".format(t, steps)) self.env.close() # main if __name__ == \"__main__\": if len(sys.argv) <=", "bitmap = mask.decode(seg) seg = _segmentationToPoly(bitmap) self.too_small_obj = False try: #notify and skip", "[data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0], -1) image", "small object of class {} with area={} in img {}'.format(self.too_small_obj, self.data_dict['area'], name)) def", "= str(seg['counts'], \"utf-8\") #utf-8 format in str bbox = mask.toBbox(seg).flatten().tolist() bounding_box = {'top_left':", "[], \"exported_objects\": []} self.env = None self.imsize = config[\"imsize\"] # only supported format", "read_file: clr = json.load(read_file) #json file with suggested colors new_dict = {} for", "except: pass #happens when test JSON is empty, which can happen for small", "image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])),", "print(\"Storing {}.\".format(filename)) with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4) with open(os.path.join(config['output_train_folder'],", "with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f: data_train = json.load(f) # get the ID of", "config.json to output_folder with open(config['output_folder']+'/config_dataset.json', 'w') as f: commentjson.dump(config, f) if config['dataset_type'] ==", "format \"\"\" categories = [] for value, key in config['used_class_names'].items(): categories.append({\"id\": int(key), \"name\":", "= 0 #for paralel dataset generation >0, otherwise 0 if config['make_dataset'] == \"new\":", "data structure Returns: :return data_train: (dict) Data structure for training data :return data_test:", "json_name = 'img_{}_cam{}.json'.format(image_id, camera_id) json_dict = {\"images\": data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()} if len(data[\"images\"])", "= config['robot'] elif object_uid == gripper_uids[0]: class_name = env.robot.get_name() else: env_object_list = list(filter(lambda", "#store dataset image and info generator.store_image_info() if config['make_dataset'] in [\"new\", \"resume\"]: cv2.imwrite(os.path.join(path, name),", "def init_data(self): #COCO \"\"\" Initialize data structures for COCO dataset annotations Returns: :return", "used_objects.append(x[1]) if config['color_dict'] == 'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write config.json to output_folder with", "config['num_episodes_hard_reset'] == 0: #to prevent objects vanishing when GUI is on print(\"Hard reset!!!\")", "self.env.close() # main if __name__ == \"__main__\": if len(sys.argv) <= 1: config_path =", "f in files: os.remove(f) files = glob.glob(os.path.join(config['output_train_folder'],'./*')) for f in files: os.remove(f) data_train,", "import mask import pybullet as p from bbox import BBox3D from myGym.envs.wrappers import", "def get_env(self): #DOPE env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on", "Testing data from preceding dataset generation in COCO data structure :return image_id: (int)", "to dataset config file Returns: :return env: (object) Environment for dataset generation \"\"\"", "len(data_test['annotations']) + len(data_train['annotations']) print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) except FileNotFoundError:", "env.get_observation() env_objects = observation[\"objects\"] for obj in env_objects: if obj.name not in self.objdim.keys():", "for VAE dataset generation according to dataset config file \"\"\" self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'],", "{\"objects\":[]} return data_train, data_test def resume(self): #DOPE try: files_test = [int(x.replace('.jpg','')) for x", "break # write JSON annotations every n periods or at the end if", "env = generator.get_env() first_link_uid = env.robot.robot_uid robot_uids = np.array([((x + 1) << 24)", "obj_ids = [x for x in np.unique(img_mask)] #identify objects(links) in the camera view", "'/train' generator.collect_data(config['num_episodes']) print(\"Dataset finished. Ready to train!\") raise SystemExit(0) # initialize pybullet env", "#DOPE env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on = config['gui_on'],", "= 0 #image_id*x for paralel dataset generation, otherwise 0 def init_data(self): #COCO \"\"\"", "get_append_annotations(self): #DOPE cuboid_with_centroid = env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg = segmentationToCocoMask(img_mask, object_uid) seg['counts'] =", "kuka and used objects in the image (in the camera view) if object_uid", "data_struct_image(self): #COCO \"\"\" Assign name to COCO dataset image and train of test", "name to COCO dataset image and train of test status Returns: :param data:", "\"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\": bounding_box, \"cuboid\": cuboid, \"projected_cuboid\": projected_cuboid,", "config['output_train_folder'] data = data_train else: folder = config['output_test_folder'] data = data_test json_name =", "obj in env_objects: if obj.name not in self.objdim.keys(): self.objdim[obj.name] = obj.get_cuboid_dimensions() def init_data(self):", "Create list of dictionaries with category id-name pairs in MSCOCO format Returns: :return", "in the image (in the camera view) if object_uid == robot_uids[0]: class_name =", "0, -9.81) def collect_data(self, steps): \"\"\" Collect data for VAE dataset Parameters: :param", "pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') # helper functions: def color_names_to_rgb(): \"\"\" Assign RGB colors to objects", "#utf-8 format in str #2 or poly segmentation format bitmap = mask.decode(seg) seg", "str(seg['counts'], \"utf-8\") #utf-8 format in str bbox = mask.toBbox(seg).flatten().tolist() bounding_box = {'top_left': bbox[:2],", "from preceding dataset generation in COCO data structure :return image_id: (int) ID of", "len(contour) > 4: segmentationPoly.append(contour) return segmentationPoly def create_coco_json(): #COCO \"\"\" Create COCO json", "str(image_id) + '.json' if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing {} and {} at", "data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()} if len(data[\"images\"]) > 0: with open(os.path.join(folder,json_name), 'w') as f:", "try: files_test = [int(x.replace('.jpg','')) for x in os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")] files_train = [int(x.replace('.jpg',''))", "robot_uids = np.array([((x + 1) << 24) + first_link_uid for x in range(-1,", "category_id=class_id, segmentation=seg, area=area, bbox=bbox, iscrowd=0, ) if self.too_small_obj == False: #area ok data['annotations'].append(self.data_dict)", "paralel dataset generation, otherwise 0 def init_data(self): #COCO \"\"\" Initialize data structures for", ":return data_test: (dist) Testing data from preceding dataset generation in COCO data structure", "[\"new\", \"resume\"]: print(\"Storing {} and {} at episode {} of {}.\".format(filename, name, episode,", "range(env.robot.gripper_index, env.robot.num_joints + 1)]) # check mode of writing files image_id = 0", "small visible representation assert(area > config['min_obj_area']) assert(len(seg)>0 and len(seg[0])>0) except: #make inverse map", "def episode_zero(self): self.objdim = {} while len(self.objdim.keys()) < len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation =", "import commentjson import sys import random from pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult from pycocotools", "from matplotlib.pyplot import imshow, show import cv2 import numpy as np import os", "Data structure for testing data \"\"\" data_train, data_test = create_coco_json() return data_train, data_test", "for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id = image_id +", "# initialize dataset generator if config['dataset_type'] == 'coco': generator = GeneratorCoco() elif config['dataset_type']", "_ in range(x[0]): used_objects.append(x[1]) if config['color_dict'] == 'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write config.json", "for item in value: new_value.append(clr[item]) new_dict[key] = new_value config['color_dict'] = new_dict def _category_coco_format():", "{}.\".format(episode, config['num_episodes'])) for flag in ['test','train']: if flag == 'train': folder = config['output_train_folder']", "= observation['camera_data'][6]['image'] imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize)) cv2.imshow(\"image\", img)", "True else config['output_train_folder'] #get dataset image and its mask im = observation[\"camera_data\"][camera_id][\"image\"] im", "generator.episode_zero() elif config['make_dataset'] == 'resume': #resume print(\"Restoring dataset generation\") data_test, data_train, image_id =", "config['num_steps']-1 or t%config['make_shot_every_frame'] == 0: # we only use frames from some steps", "camera_id)) for point in box3D] if class_name not in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\":", "the end if episode % config['autosafe_episode'] == 0 or episode == config['num_episodes']-1: generator.write_json_end()", "contours, _ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly = [] for contour in contours:", "box3D.append(tuple(boxp[x])) boxc = list(boxc) projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id)) for", "config['output_test_folder'] data = data_test json_name = 'img_{}_cam{}.json'.format(image_id, camera_id) json_dict = {\"images\": data[\"images\"], \"type\":'instances',\"annotations\":", "import numpy as np import os import glob import json import commentjson import", "self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\": class_name, \"segmentation_class_id\": class_id, \"cuboid_dimensions\": self.objdim[class_name] }) self.data_dict = { \"class\":", "= create_coco_json() return data_train, data_test def resume(self): #COCO \"\"\" Resume COCO dataset generation", "[]} self.env = None self.imsize = config[\"imsize\"] # only supported format at the", "{} at episode {} of {}.\".format(filename, name, episode, config['num_episodes'])) with open(os.path.join(path, filename), 'w')", "according to dataset config file \"\"\" self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on", "contour = contour.flatten().tolist() if len(contour) > 4: segmentationPoly.append(contour) return segmentationPoly def create_coco_json(): #COCO", "else: cv2.imshow('Labeled object', mask) cv2.waitKey(1000) print(self.data_dict['bbox']) def write_json_end(self): #COCO \"\"\" Write json file", "for paralel dataset generation, otherwise 0 def init_data(self): #COCO \"\"\" Initialize data structures", "filename), 'w') as f: json.dump(self.object_settings, f, indent=4) class GeneratorVae: \"\"\" Generator class for", "data_test def resume(self): #COCO \"\"\" Resume COCO dataset generation Returns: :return data_train: (dict)", "to objects by name as specified in the training config file \"\"\" with", "the image) img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge gripper links img_mask = np.where(np.isin(img_mask,robot_uids),", "json file with COCO annotations to output directory \"\"\" if config['make_dataset'] in [\"new\",", "#action = [2,2,2,2,2,2] self.env.robot.reset_random(action) # send the Kuka arms up observation, reward, done,", "the movements of robots (using joint control) action = env.action_space.sample() observation, reward, done,", "new_dict[key] = new_value config['color_dict'] = new_dict def _category_coco_format(): #COCO \"\"\" Create list of", "files image_id = 0 #for paralel dataset generation >0, otherwise 0 if config['make_dataset']", "continue if class_name in config['used_class_names']: class_id = config['used_class_names'][class_name] generator.get_append_annotations() #annotate and append annotations", "image dataset for YOLACT vision model training \"\"\" def __init__(self): pass def get_env(self):", "= env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg = segmentationToCocoMask(img_mask, object_uid) seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8", "clear data and continue data[\"images\"].clear() data[\"annotations\"].clear() class GeneratorDope: #DOPE def __init__(self): self.object_settings =", "= {} while len(self.objdim.keys()) < len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation = env.get_observation() env_objects =", "#resume print(\"Restoring dataset generation\") data_test, data_train, image_id = generator.resume() elif (config['make_dataset'] == \"display\"):", "config['num_objects_range'], used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], dataset = True,", "paralel dataset generation >0, otherwise 0 if config['make_dataset'] == \"new\": #cleanup files files", "= '{}.jpg'.format(image_id) return data, name def store_image_info(self): #DOPE #write dataset image info filename", "for x in range(6)] #action = [2,2,2,2,2,2] self.env.robot.reset_random(action) # send the Kuka arms", "\"\"\" env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on = config['gui_on'],", "config['make_dataset'] in [\"new\", \"resume\"]: cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) if done: print(\"Episode finished", "object', mask) cv2.waitKey(1000) print(self.data_dict['bbox']) def write_json_end(self): #COCO \"\"\" Write json file with COCO", "0: # generator.episode_zero() if episode % config['num_episodes_hard_reset'] == 0: #to prevent objects vanishing", "image and info generator.store_image_info() if config['make_dataset'] in [\"new\", \"resume\"]: cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY),", "with area={} in img {}'.format(self.too_small_obj, self.data_dict['area'], name)) def visualize(self): #COCO \"\"\" Visualize mask", "encoding RLE segmentation format seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str #2", "data_train name = '{}.jpg'.format(image_id) return data, name def store_image_info(self): #DOPE #write dataset image", "projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id)) for point in cuboid] projected_3DBB_centroid = list(env.project_point_to_camera_image(boxc, camera_id)) projected_3DBB", "with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w')", ":return segmentationPoly: (list) Segmentation converted to polynoms \"\"\" contours, _ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE,", "too small visible representation assert(area > config['min_obj_area']) assert(len(seg)>0 and len(seg[0])>0) except: #make inverse", "VAE dataset generation according to dataset config file \"\"\" self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot", "for episodes: {}\".format(image_id, config['num_episodes'])) self.episode_zero() except FileNotFoundError: image_id = 0 return self.init_data()[0],self.init_data()[1],image_id def", "intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id) captured_image_size = {\"width\": config['camera_resolution'][0], \"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\" + str(camera_id),", "= env.get_observation() env_objects = observation[\"objects\"] for obj in env_objects: if obj.name not in", "init_data(self): #DOPE data_train = {\"objects\":[]} data_test = {\"objects\":[]} return data_train, data_test def resume(self):", "= {\"images\": data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()} if len(data[\"images\"]) > 0: with open(os.path.join(folder,json_name), 'w')", "segmentation=seg, area=area, bbox=bbox, iscrowd=0, ) if self.too_small_obj == False: #area ok data['annotations'].append(self.data_dict) #append", "if config['dataset_type'] == 'vae': generator.get_env() dataset_pth = config['output_folder'] + '/train' generator.collect_data(config['num_episodes']) print(\"Dataset finished.", "p.setGravity(0, 0, -9.81) return env def episode_zero(self): self.objdim = {} while len(self.objdim.keys()) <", "files_test = [int(x.replace('.jpg','')) for x in os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")] files_train = [int(x.replace('.jpg','')) for", "Returns: :return data_train: (dict) Training data from preceding dataset generation in COCO data", "pkg_resources # config, specify here or pass as an input argument CONFIG_DEFAULT =", "* \"0\" + str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img) data[t] = img print(\"Image {}/{}\".format(t, steps))", "RLE to polynoms ([[x1 y1 x2 x2 y2 ...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters:", "+ str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img) data[t] = img print(\"Image {}/{}\".format(t, steps)) self.env.close() #", "# bool, test/train data? path = config['output_test_folder'] if isTestSample == True else config['output_train_folder']", "draw_bounding_box_3D(self): #DOPE for points in range(7): #p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78, 0.47), lineWidth =", "dataset generation in COCO data structure :return image_id: (int) ID of last generated", "0: #to prevent objects vanishing when GUI is on print(\"Hard reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'],", "to generate train/test sets from the simulator in COCO or DOPE format. Used", "mask.toBbox(seg).flatten().tolist() #1 run length encoding RLE segmentation format seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8", "> 0: env_object = env_object_list[0] class_name = env_object.get_name() else: continue if class_name in", "sets from the simulator in COCO or DOPE format. Used for vision training.", "data_test = json.load(f) except: pass #happens when test JSON is empty, which can", "for img in data_train['images']] image_id = max(img_ids) +1 # +1 if last sample", "image_id = max(max(files_test),max(files_train)) print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) self.episode_zero() except", "__init__(self): pass def get_env(self): #COCO \"\"\" Create environment for COCO dataset generation according", "= pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with open(config_path) as file: config = commentjson.load(file) # initialize dataset", "data from preceding dataset generation in COCO data structure :return image_id: (int) ID", "json.dump(self.object_settings, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4) class", "bbox import BBox3D from myGym.envs.wrappers import RandomizedEnvWrapper import pkg_resources # config, specify here", "test JSON is empty, which can happen for small numbers try: with open(os.path.join(config['output_train_folder'],'annotations.json'),", "object with too small visible representation assert(area > config['min_obj_area']) assert(len(seg)>0 and len(seg[0])>0) except:", "def get_env(self): \"\"\" Create environment for VAE dataset generation according to dataset config", "data = data_test if isTestSample == True else data_train name = '{}.jpg'.format(image_id) return", "self.objdim.keys(): self.objdim[obj.name] = obj.get_cuboid_dimensions() def init_data(self): #DOPE data_train = {\"objects\":[]} data_test = {\"objects\":[]}", "from myGym.envs.wrappers import RandomizedEnvWrapper import pkg_resources # config, specify here or pass as", "done, info = self.env.step(action) img = observation['camera_data'][6]['image'] imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img =", "open(config_path) as file: config = commentjson.load(file) # initialize dataset generator if config['dataset_type'] ==", "print(self.data_dict['bbox']) def write_json_end(self): #COCO \"\"\" Write json file with COCO annotations to output", "in COCO or DOPE format. Used for vision training. import gym from myGym", "= [] for item in value: new_value.append(clr[item]) new_dict[key] = new_value config['color_dict'] = new_dict", "bbox=bbox, iscrowd=0, ) if self.too_small_obj == False: #area ok data['annotations'].append(self.data_dict) #append annotations self.id_unique", "COCO dataset image and train of test status Returns: :param data: (dict) Corresponding", "if episode % config['autosafe_episode'] == 0 or episode == config['num_episodes']-1: generator.write_json_end() data_train, data_test", "with open(config_path) as file: config = commentjson.load(file) # initialize dataset generator if config['dataset_type']", "{}/{}\".format(episode, config['num_episodes'])) #env reset #random_robot randomizes the init position of robots #random_pos randomizes", "data_train, data_test def resume(self): #COCO \"\"\" Resume COCO dataset generation Returns: :return data_train:", "+ '/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define objects to appear in the env,", "image_id = image_id + 1 #unique isTestSample = np.random.random_sample() < config['train_test_split_pct'] # bool,", "categories: (list) Categories in MSCOCO format \"\"\" categories = [] for value, key", "dataset generation according to dataset config file \"\"\" self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot =", "flag in ['test','train']: if flag == 'train': folder = config['output_train_folder'] data = data_train", "= RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on = config['gui_on'], show_bounding_boxes_gui =", "generator.data_struct_image() for object_uid in obj_ids: #loop through kuka and used objects in the", "camera_id)) projected_3DBB = [list(env.project_point_to_camera_image(point, camera_id)) for point in box3D] if class_name not in", "= mask.decode(seg) seg = _segmentationToPoly(bitmap) self.too_small_obj = False try: #notify and skip the", "image img_ids = [img[\"id\"] for img in data_train['images']] image_id = max(img_ids) +1 #", "name=\"camera\" + str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, )) if config['make_dataset'] in [\"new\", \"resume\"]: filename =", "in range(x[0]): used_objects.append(x[1]) if config['color_dict'] == 'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write config.json to", "myGym.envs.wrappers import RandomizedEnvWrapper import pkg_resources # config, specify here or pass as an", "= image_id + 1 #unique isTestSample = np.random.random_sample() < config['train_test_split_pct'] # bool, test/train", "seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str bbox = mask.toBbox(seg).flatten().tolist() bounding_box =", "data :return data_test: (dist) Data structure for testing data \"\"\" data_train = dict(", "self.too_small_obj = False try: #notify and skip the object with too small visible", "True else data_train name = '{}.jpg'.format(image_id) return data, name def store_image_info(self): #DOPE #write", "= config['used_class_names'][class_name] generator.get_append_annotations() #annotate and append annotations if config['visualize']: #visualize generator.visualize() #store dataset", "generation according to dataset config file \"\"\" self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'],", "id-name pairs in MSCOCO format Returns: :return categories: (list) Categories in MSCOCO format", "img in data_train['images']] image_id = max(img_ids) +1 # +1 if last sample were", "[255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0], -1) image = cv2.circle(cv2.UMat(image),", "training \"\"\" def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} self.env = None", "'/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define objects to appear in the env, add", "def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} def get_env(self): #DOPE env =", "generator.collect_data(config['num_episodes']) print(\"Dataset finished. Ready to train!\") raise SystemExit(0) # initialize pybullet env env", "\"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\" + str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, )) if config['make_dataset'] in [\"new\",", "object_uid == robot_uids[0]: class_name = config['robot'] elif object_uid == gripper_uids[0]: class_name = env.robot.get_name()", "projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict) def visualize(self): #DOPE image = im for projected_cuboid_point", "structure for testing data \"\"\" data_train = dict( images=[# file_name, height, width, id", "(object) Environment for dataset generation \"\"\" env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on", "+=1 else: #area too small to be realistically seen print('Too small object of", "or DOPE format. Used for vision training. import gym from myGym import envs", "small numbers try: with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f: data_train = json.load(f) # get", "config.json passed in argument. Loading default config: {}'.format(CONFIG_DEFAULT)) else: config_path = pkg_resources.resource_filename(\"myGym\", sys.argv[1])", "bbox = mask.toBbox(seg).flatten().tolist() #1 run length encoding RLE segmentation format seg['counts'] = str(seg['counts'],", "colors config['used_class_names'] = dict([x[1:3] for x in config['used_class_names_quantity']]) used_objects = [] for x", "padding = 6 - len(str(t+7999)) name = padding * \"0\" + str(t+7999) cv2.imwrite(os.path.join(dataset_pth,", "captured_image_size=captured_image_size, )) if config['make_dataset'] in [\"new\", \"resume\"]: filename = \"_camera_settings\" + '.json' print(\"Storing", "as file: config = commentjson.load(file) # initialize dataset generator if config['dataset_type'] == 'coco':", "(list) Segmentation converted to polynoms \"\"\" contours, _ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly", "image_id = generator.resume() elif (config['make_dataset'] == \"display\"): data_train, data_test = generator.init_data() generator.episode_zero() #", "self.env = None self.imsize = config[\"imsize\"] # only supported format at the moment", "np.array([((x + 1) << 24) + first_link_uid for x in range(env.robot.gripper_index, env.robot.num_joints +", "done: print(\"Episode finished after {} timesteps\".format(t+1)) break # write JSON annotations every n", "img_ids = [img[\"id\"] for img in data_train['images']] image_id = max(img_ids) +1 # +1", "new_value = [] for item in value: new_value.append(clr[item]) new_dict[key] = new_value config['color_dict'] =", "open(os.path.join(path, filename), 'w') as f: json.dump(data, f, indent=4) def get_append_annotations(self): #DOPE cuboid_with_centroid =", "segmentation, area, iscrowd, image_id, bbox, category_id, id ], categories = _category_coco_format(), ) data_test", "run length encoding RLE segmentation format seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in", "image and train of test status Returns: :param data: (dict) Corresponding data dictionary", "= np.random.random_sample() < config['train_test_split_pct'] # bool, test/train data? path = config['output_test_folder'] if isTestSample", "len(data_train['annotations']) print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) except FileNotFoundError: image_id =", "we only use frames from some steps env.render() #only render at the steps/frames", "image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) except FileNotFoundError: image_id = 0 return data_test,", "active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id = image_id + 1 #unique isTestSample = np.random.random_sample() <", "padding * \"0\" + str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img) data[t] = img print(\"Image {}/{}\".format(t,", "env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center class GeneratorCoco: #COCO \"\"\" Generator class for COCO", "f: commentjson.dump(config, f) if config['dataset_type'] == 'vae': generator.get_env() dataset_pth = config['output_folder'] + '/train'", "x in np.unique(img_mask)] #identify objects(links) in the camera view (in the image) img_mask", "self.id_unique = 0 #image_id*x for paralel dataset generation, otherwise 0 def init_data(self): #COCO", "the ID of last image img_ids = [img[\"id\"] for img in data_train['images']] image_id", "generator.init_data() generator.episode_zero() # the main loop for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through", "category id-name pairs in MSCOCO format Returns: :return categories: (list) Categories in MSCOCO", "self.objdim[obj.name] = obj.get_cuboid_dimensions() def init_data(self): #DOPE data_train = {\"objects\":[]} data_test = {\"objects\":[]} return", "in files: os.remove(f) data_train, data_test = generator.init_data() generator.episode_zero() elif config['make_dataset'] == 'resume': #resume", "= _category_coco_format(), ) data_test = dict( images=[# file_name, height, width, id ], type='instances',", "\"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\": bounding_box, \"cuboid\": cuboid, \"projected_cuboid\": projected_cuboid, \"box3D\": box3D, \"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\":", "self.objdim[class_name] }) self.data_dict = { \"class\": class_name, \"class_id\": class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\":", "self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({ \"class\": class_name, \"segmentation_class_id\": class_id, \"cuboid_dimensions\": self.objdim[class_name] }) self.data_dict = {", "for f in files: os.remove(f) data_train, data_test = generator.init_data() generator.episode_zero() elif config['make_dataset'] ==", "annotations self.id_unique +=1 else: #area too small to be realistically seen print('Too small", "for vision training. import gym from myGym import envs from matplotlib.pyplot import imshow,", "\"\"\" Initialize data structures for COCO dataset annotations Returns: :return data_train: (dict) Data", "\"_object_settings\" + '.json' with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4) with", "import cv2 import numpy as np import os import glob import json import", "'dope': generator = GeneratorDope() elif config['dataset_type'] == 'vae': generator = GeneratorVae() else: raise", "{} and {} at episode {} of {}.\".format(filename, name, episode, config['num_episodes'])) with open(os.path.join(path,", "check mode of writing files image_id = 0 #for paralel dataset generation >0,", "open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as", "Bitmap mask :return segmentationPoly: (list) Segmentation converted to polynoms \"\"\" contours, _ =", "\"\"\" data_train, data_test = create_coco_json() return data_train, data_test def resume(self): #COCO \"\"\" Resume", "not in here). it's safe to have holes in the ids, just need", "mask import pybullet as p from bbox import BBox3D from myGym.envs.wrappers import RandomizedEnvWrapper", "id ], categories = _category_coco_format(), ) return data_train, data_test def create_3D_box(env_object,objdim): #DOPE objpos", "folder = config['output_train_folder'] data = data_train else: folder = config['output_test_folder'] data = data_test", "for x in config['used_class_names_quantity']: for _ in range(x[0]): used_objects.append(x[1]) if config['color_dict'] == 'object_colors':", ":param name: (string) Name of image file for saving \"\"\" data = data_test", "config['min_obj_area']) assert(len(seg)>0 and len(seg[0])>0) except: #make inverse map id->name (just to pretty print)", "def __init__(self): pass def get_env(self): #COCO \"\"\" Create environment for COCO dataset generation", "f) if config['dataset_type'] == 'vae': generator.get_env() dataset_pth = config['output_folder'] + '/train' generator.collect_data(config['num_episodes']) print(\"Dataset", "= [] for x in config['used_class_names_quantity']: for _ in range(x[0]): used_objects.append(x[1]) if config['color_dict']", "else: continue if class_name in config['used_class_names']: class_id = config['used_class_names'][class_name] generator.get_append_annotations() #annotate and append", "\"\"\" Assign RGB colors to objects by name as specified in the training", "in config['object_colors'].items(): new_value = [] for item in value: new_value.append(clr[item]) new_dict[key] = new_value", "(array) Bitmap mask :return segmentationPoly: (list) Segmentation converted to polynoms \"\"\" contours, _", "config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) def collect_data(self, steps): \"\"\" Collect data for VAE dataset", "# we only use frames from some steps env.render() #only render at the", "[list(env.project_point_to_camera_image(point, camera_id)) for point in box3D] if class_name not in self.object_settings[\"exported_object_classes\"]: self.object_settings[\"exported_object_classes\"].append(class_name) self.object_settings['exported_objects'].append({", "episode_zero(self): \"\"\" Initial espisode set-up \"\"\" self.id_unique = 0 #image_id*x for paralel dataset", "\"\"\" Collect data for VAE dataset Parameters: :param steps: (int) Number of episodes", "= data_train else: folder = config['output_test_folder'] data = data_test json_name = 'img_{}_cam{}.json'.format(image_id, camera_id)", "if config['make_dataset'] == \"new\": #cleanup files files = glob.glob(os.path.join(config['output_test_folder'],'./*')) for f in files:", "type='instances', annotations=[# segmentation, area, iscrowd, image_id, bbox, category_id, id ], categories = _category_coco_format(),", "first_link_uid = env.robot.robot_uid robot_uids = np.array([((x + 1) << 24) + first_link_uid for", "create_coco_json() return data_train, data_test def resume(self): #COCO \"\"\" Resume COCO dataset generation Returns:", "x in range(env.robot.gripper_index, env.robot.num_joints + 1)]) # check mode of writing files image_id", "(using joint control) action = env.action_space.sample() observation, reward, done, info = env.step(action) if", "= {\"exported_object_classes\": [], \"exported_objects\": []} def get_env(self): #DOPE env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot =", "SystemExit(0) # initialize pybullet env env = generator.get_env() first_link_uid = env.robot.robot_uid robot_uids =", "write_json_end(self): #COCO \"\"\" Write json file with COCO annotations to output directory \"\"\"", "if config['visualize']: #visualize generator.visualize() #store dataset image and info generator.store_image_info() if config['make_dataset'] in", "randomize the movements of robots (using joint control) action = env.action_space.sample() observation, reward,", "= np.array([((x + 1) << 24) + first_link_uid for x in range(env.robot.gripper_index, env.robot.num_joints", "episode_zero(self): self.objdim = {} while len(self.objdim.keys()) < len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation = env.get_observation()", "in env_objects: if obj.name not in self.objdim.keys(): self.objdim[obj.name] = obj.get_cuboid_dimensions() def init_data(self): #DOPE", "GeneratorCoco() elif config['dataset_type'] == 'dope': generator = GeneratorDope() elif config['dataset_type'] == 'vae': generator", "0, -9.81) return env def episode_zero(self): self.objdim = {} while len(self.objdim.keys()) < len(config['used_class_names'].keys()):", "[2,2,2,2,2,2] self.env.robot.reset_random(action) # send the Kuka arms up observation, reward, done, info =", "output_folder with open(config['output_folder']+'/config_dataset.json', 'w') as f: commentjson.dump(config, f) if config['dataset_type'] == 'vae': generator.get_env()", "#COCO \"\"\" Visualize mask and bounding box coordinates for COCO annotated object \"\"\"", "contours: contour = contour.flatten().tolist() if len(contour) > 4: segmentationPoly.append(contour) return segmentationPoly def create_coco_json():", "= list(boxc) projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid = [list(env.project_point_to_camera_image(point, camera_id)) for point in", "f: json.dump(self.camera, f, indent=4) filename = \"_object_settings\" + '.json' with open(os.path.join(config['output_test_folder'], filename), 'w')", "active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings = env.get_camera_opencv_matrix_values(camera_id) captured_image_size = {\"width\": config['camera_resolution'][0], \"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict(", "], categories = _category_coco_format(), ) data_test = dict( images=[# file_name, height, width, id", "episode % config['autosafe_episode'] == 0 or episode == config['num_episodes']-1: generator.write_json_end() data_train, data_test =", "'w') as f: commentjson.dump(config, f) if config['dataset_type'] == 'vae': generator.get_env() dataset_pth = config['output_folder']", "filename), 'w') as f: json.dump(self.object_settings, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f:", "of last image img_ids = [img[\"id\"] for img in data_train['images']] image_id = max(img_ids)", "in here). it's safe to have holes in the ids, just need to", "contour in contours: contour = contour.flatten().tolist() if len(contour) > 4: segmentationPoly.append(contour) return segmentationPoly", "'configs/dataset_coco.json') # helper functions: def color_names_to_rgb(): \"\"\" Assign RGB colors to objects by", "env env = generator.get_env() first_link_uid = env.robot.robot_uid robot_uids = np.array([((x + 1) <<", "cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly = [] for contour in contours: contour = contour.flatten().tolist() if", ":return categories: (list) Categories in MSCOCO format \"\"\" categories = [] for value,", "{} of {}.\".format(episode, config['num_episodes'])) for flag in ['test','train']: if flag == 'train': folder", "= contour.flatten().tolist() if len(contour) > 4: segmentationPoly.append(contour) return segmentationPoly def create_coco_json(): #COCO \"\"\"", "f: data_train = json.load(f) # get the ID of last image img_ids =", "def visualize(self): #COCO \"\"\" Visualize mask and bounding box coordinates for COCO annotated", "0.78, 0.47), lineWidth = 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0, 0.99), lineWidth = 4,", "range(steps): self.env.reset(random_pos=True) self.env.render() action = [random.uniform(1,2) for x in range(6)] #action = [2,2,2,2,2,2]", "range(6)] #action = [2,2,2,2,2,2] self.env.robot.reset_random(action) # send the Kuka arms up observation, reward,", "[] for item in value: new_value.append(clr[item]) new_dict[key] = new_value config['color_dict'] = new_dict def", "with category id-name pairs in MSCOCO format Returns: :return categories: (list) Categories in", "categories = [] for value, key in config['used_class_names'].items(): categories.append({\"id\": int(key), \"name\": str(value)}) return", "pass def get_env(self): #COCO \"\"\" Create environment for COCO dataset generation according to", "= 0 return data_test, data_train, image_id def data_struct_image(self): #COCO \"\"\" Assign name to", "y2 ...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param mask: (array) Bitmap mask :return segmentationPoly:", "seg = segmentationToCocoMask(img_mask, object_uid) seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str bbox", "initialize pybullet env env = generator.get_env() first_link_uid = env.robot.robot_uid robot_uids = np.array([((x +", "file \"\"\" with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\") as read_file: clr = json.load(read_file) #json file", "\"\"\" def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} self.env = None self.imsize", "from bbox import BBox3D from myGym.envs.wrappers import RandomizedEnvWrapper import pkg_resources # config, specify", "y1 x2 x2 y2 ...]]). Code from https://github.com/facebookresearch/Detectron/issues/100#issuecomment-362882830. Parameters: :param mask: (array) Bitmap", "== 'dope': generator = GeneratorDope() elif config['dataset_type'] == 'vae': generator = GeneratorVae() else:", "cuboid=cuboid_with_centroid[:8] seg = segmentationToCocoMask(img_mask, object_uid) seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str", "= 6 - len(str(t+7999)) name = padding * \"0\" + str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)),", "#write dataset image info filename = str(image_id) + '.json' if config['make_dataset'] in [\"new\",", "its mask im = observation[\"camera_data\"][camera_id][\"image\"] im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR) #fix colors img_mask =", "= mask.toBbox(seg).flatten().tolist() #1 run length encoding RLE segmentation format seg['counts'] = str(seg['counts'], \"utf-8\")", "in contours: contour = contour.flatten().tolist() if len(contour) > 4: segmentationPoly.append(contour) return segmentationPoly def", "= data_test json_name = 'img_{}_cam{}.json'.format(image_id, camera_id) json_dict = {\"images\": data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()}", "return env def episode_zero(self): self.objdim = {} while len(self.objdim.keys()) < len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False)", "used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], dataset = True, ),", "key in config['used_class_names'].items(): categories.append({\"id\": int(key), \"name\": str(value)}) return categories def _segmentationToPoly(mask, ): \"\"\"", "[data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0], -1) print(class_name)", "COCO or DOPE format. Used for vision training. import gym from myGym import", "# check mode of writing files image_id = 0 #for paralel dataset generation", "COCO dataset generation according to dataset config file Returns: :return env: (object) Environment", "\"\"\" mask = img_mask==object_uid mask = np.expand_dims(mask, axis=2) mask = 255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1)", "(dict) Data structure for training data :return data_test: (dist) Data structure for testing", "\"\"\" Resume COCO dataset generation Returns: :return data_train: (dict) Training data from preceding", "data_test = {\"objects\":[]} return data_train, data_test def resume(self): #DOPE try: files_test = [int(x.replace('.jpg',''))", "for x in os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")] image_id = max(max(files_test),max(files_train)) print(\"Resuming from image_id {}", "camera view (in the image) #prepare data strucuture data, name = generator.data_struct_image() for", "mode of writing files image_id = 0 #for paralel dataset generation >0, otherwise", "str(value)}) return categories def _segmentationToPoly(mask, ): \"\"\" Convert segmentation from RLE to polynoms", "default config: {}'.format(CONFIG_DEFAULT)) else: config_path = pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with open(config_path) as file: config", "self.camera = {\"camera_settings\": []} for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c]", "= np.expand_dims(mask, axis=2) mask = 255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1) print(class_name) if self.too_small_obj: cv2.imshow('Too small", "data for VAE dataset Parameters: :param steps: (int) Number of episodes initiated during", "float(mask.area(seg)) bbox = mask.toBbox(seg).flatten().tolist() #1 run length encoding RLE segmentation format seg['counts'] =", "4, lifeTime = 1) def write_json_end(self): #DOPE self.camera = {\"camera_settings\": []} for c", "return data, name def store_image_info(self): #COCO \"\"\" Append COCO dataset image info to", "directories config['output_test_folder'] = config['output_folder'] + '/test' config['output_train_folder'] = config['output_folder'] + '/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True)", "else: config_path = pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with open(config_path) as file: config = commentjson.load(file) #", ":return image_id: (int) ID of last generated image in preceding dataset generation \"\"\"", "RandomizedEnvWrapper import pkg_resources # config, specify here or pass as an input argument", "inv_map[class_id] self.data_dict = dict( id=self.id_unique, image_id=image_id, category_id=class_id, segmentation=seg, area=area, bbox=bbox, iscrowd=0, ) if", "for x in np.unique(img_mask)] #identify objects(links) in the camera view (in the image)", "as f: data_train = json.load(f) # get the ID of last image img_ids", "random from pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult from pycocotools import mask import pybullet as", "data? path = config['output_test_folder'] if isTestSample == True else config['output_train_folder'] #get dataset image", "config[\"imsize\"] # only supported format at the moment def get_env(self): \"\"\" Create environment", "1) def write_json_end(self): #DOPE self.camera = {\"camera_settings\": []} for c in range(np.count_nonzero(config['active_cameras'])): #loop", "\"display\"): data_train, data_test = generator.init_data() generator.episode_zero() # the main loop for episode in", "bounding box coordinates for COCO annotated object \"\"\" mask = img_mask==object_uid mask =", "= np.zeros((steps, self.imsize, self.imsize, 3), dtype='f') for t in range(steps): self.env.reset(random_pos=True) self.env.render() action", "[\"new\", \"resume\"]: cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) if done: print(\"Episode finished after {}", "cuboid_with_centroid = env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg = segmentationToCocoMask(img_mask, object_uid) seg['counts'] = str(seg['counts'], \"utf-8\")", "JSON is empty, which can happen for small numbers try: with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r')", "#area too small to be realistically seen print('Too small object of class {}", "image info filename = str(image_id) + '.json' if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing", "Name of image file for saving \"\"\" data = data_test if isTestSample ==", "else config['output_train_folder'] #get dataset image and its mask im = observation[\"camera_data\"][camera_id][\"image\"] im =", "#p.addUserDebugLine([0,0,0], [1,2,3], lineColorRGB=(0.31, 0.78, 0.47), lineWidth = 10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0, 0.99),", "data structures for COCO dataset annotations Returns: :return data_train: (dict) Data structure for", "config = commentjson.load(file) # initialize dataset generator if config['dataset_type'] == 'coco': generator =", "True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self): self.objdim", "= [img[\"id\"] for img in data_train['images']] image_id = max(img_ids) +1 # +1 if", "saving \"\"\" data = data_test if isTestSample == True else data_train name =", "= generator.data_struct_image() for object_uid in obj_ids: #loop through kuka and used objects in", "robots #random_pos randomizes the init positions of objects # if episode == 0:", "annotations=[# segmentation, area, iscrowd, image_id, bbox, category_id, id ], categories = _category_coco_format(), )", "active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset = True, ), config_path =", "episodes: {}\".format(image_id, config['num_episodes'])) self.episode_zero() except FileNotFoundError: image_id = 0 return self.init_data()[0],self.init_data()[1],image_id def data_struct_image(self):", "images=[# file_name, height, width, id ], type='instances', annotations=[# segmentation, area, iscrowd, image_id, bbox,", "\"\"\" if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing annotations.json at episode {} of {}.\".format(episode,", "data structure :return image_id: (int) ID of last generated image in preceding dataset", "too small to be realistically seen print('Too small object of class {} with", "print(\"Storing {} and {} at episode {} of {}.\".format(filename, name, episode, config['num_episodes'])) with", "cv2.imshow('Labeled object', mask) cv2.waitKey(1000) print(self.data_dict['bbox']) def write_json_end(self): #COCO \"\"\" Write json file with", "Append COCO dataset image info to corresponding data dict \"\"\" data['images'].append(dict( file_name=name, height=im.shape[0],", "COCO dataset generation Returns: :return data_train: (dict) Training data from preceding dataset generation", "class_name = env_object.get_name() else: continue if class_name in config['used_class_names']: class_id = config['used_class_names'][class_name] generator.get_append_annotations()", "generator.visualize() #store dataset image and info generator.store_image_info() if config['make_dataset'] in [\"new\", \"resume\"]: cv2.imwrite(os.path.join(path,", "in os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")] image_id = max(max(files_test),max(files_train)) print(\"Resuming from image_id {} for episodes:", "== \"new\": #cleanup files files = glob.glob(os.path.join(config['output_test_folder'],'./*')) for f in files: os.remove(f) files", "sample were test (thus not in here). it's safe to have holes in", "monothically increase self.id_unique = len(data_test['annotations']) + len(data_train['annotations']) print(\"Resuming from image_id {} for episodes:", "else: #area too small to be realistically seen print('Too small object of class", "== False: #area ok data['annotations'].append(self.data_dict) #append annotations self.id_unique +=1 else: #area too small", "training config file \"\"\" with open(pkg_resources.resource_filename(\"myGym\", 'configs/rgbcolors.json'), \"r\") as read_file: clr = json.load(read_file)", "generator.get_env() dataset_pth = config['output_folder'] + '/train' generator.collect_data(config['num_episodes']) print(\"Dataset finished. Ready to train!\") raise", "\"categories\":_category_coco_format()} if len(data[\"images\"]) > 0: with open(os.path.join(folder,json_name), 'w') as f: json.dump(json_dict, f, indent=4)", "config['used_class_names']: class_id = config['used_class_names'][class_name] generator.get_append_annotations() #annotate and append annotations if config['visualize']: #visualize generator.visualize()", "0: # we only use frames from some steps env.render() #only render at", "= env_object.get_orientation() #objdim = env_object.get_cuboid_dimensions() box= BBox3D(objpos[0],objpos[1],objpos[2],objdim[0],objdim[1],objdim[2],objorient[3],objorient[0],objorient[1],objorient[2]) return box.p,box.center class GeneratorCoco: #COCO \"\"\"", "= config['num_objects_range'], used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], dataset =", "print(class_name) if self.too_small_obj: cv2.imshow('Too small object', mask) else: cv2.imshow('Labeled object', mask) cv2.waitKey(1000) print(self.data_dict['bbox'])", "return data_test, data_train, image_id def data_struct_image(self): #COCO \"\"\" Assign name to COCO dataset", "(dist) Data structure for testing data \"\"\" data_train, data_test = create_coco_json() return data_train,", "#env reset #random_robot randomizes the init position of robots #random_pos randomizes the init", "info to corresponding data dict \"\"\" data['images'].append(dict( file_name=name, height=im.shape[0], width=im.shape[1], id=image_id,)) def get_append_annotations(self):", "{\"images\": data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()} if len(data[\"images\"]) > 0: with open(os.path.join(folder,json_name), 'w') as", "segmentationPoly: (list) Segmentation converted to polynoms \"\"\" contours, _ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)", "(dist) Testing data from preceding dataset generation in COCO data structure :return image_id:", "if flag == 'train': folder = config['output_train_folder'] data = data_train else: folder =", "1) << 24) + first_link_uid for x in range(env.robot.gripper_index, env.robot.num_joints + 1)]) #", "to COCO dataset image and train of test status Returns: :param data: (dict)", "config['num_episodes'])) for flag in ['test','train']: if flag == 'train': folder = config['output_train_folder'] data", "#visualize generator.visualize() #store dataset image and info generator.store_image_info() if config['make_dataset'] in [\"new\", \"resume\"]:", "generator.resume() elif (config['make_dataset'] == \"display\"): data_train, data_test = generator.init_data() generator.episode_zero() # the main", "print(\"episode: {}/{}\".format(episode, config['num_episodes'])) #env reset #random_robot randomizes the init position of robots #random_pos", "indent=4) def get_append_annotations(self): #DOPE cuboid_with_centroid = env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg = segmentationToCocoMask(img_mask, object_uid)", "structure for training data :return data_test: (dist) Data structure for testing data \"\"\"", "10) #points2=(points[0]+0.001,points[1]+0.001,points[2]+0.001) p.addUserDebugLine(data[\"objects\"][-1][\"box3D\"][points],data[\"objects\"][-1][\"box3D\"][points+1], lineColorRGB=(0.0, 0.0, 0.99), lineWidth = 4, lifeTime = 1) def", "config['output_folder'] + '/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True) #define objects to appear in the", "here). it's safe to have holes in the ids, just need to monothically", "x in range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc = list(boxc) projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid, camera_id)) projected_cuboid =", "x in config['used_class_names_quantity']: for _ in range(x[0]): used_objects.append(x[1]) if config['color_dict'] == 'object_colors': color_names_to_rgb()", "True, gui_on = config['gui_on'], show_bounding_boxes_gui = config['show_bounding_boxes_gui'], changing_light_gui = config['changing_light_gui'], shadows_on = config['shadows_on'],", "mask and bounding box coordinates for COCO annotated object \"\"\" mask = img_mask==object_uid", "data[t] = img print(\"Image {}/{}\".format(t, steps)) self.env.close() # main if __name__ == \"__main__\":", "data[\"annotations\"].clear() class GeneratorDope: #DOPE def __init__(self): self.object_settings = {\"exported_object_classes\": [], \"exported_objects\": []} def", "id ], type='instances', annotations=[# segmentation, area, iscrowd, image_id, bbox, category_id, id ], categories", "segmentation from RLE to polynoms ([[x1 y1 x2 x2 y2 ...]]). Code from", "# only supported format at the moment def get_env(self): \"\"\" Create environment for", "data_train, data_test = generator.init_data() generator.episode_zero() # the main loop for episode in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))),", "#DOPE image = im for projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)),", "objects vanishing when GUI is on print(\"Hard reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True, hard=False) observation", "object of class {} with area={} in img {}'.format(self.too_small_obj, self.data_dict['area'], name)) def visualize(self):", "for f in files: os.remove(f) files = glob.glob(os.path.join(config['output_train_folder'],'./*')) for f in files: os.remove(f)", "0, -9.81) return env def episode_zero(self): \"\"\" Initial espisode set-up \"\"\" self.id_unique =", "import RandomizedEnvWrapper import pkg_resources # config, specify here or pass as an input", "= config['shadows_on'], color_dict = config['color_dict'], object_sampling_area = config['object_sampling_area'], num_objects_range = config['num_objects_range'], used_objects =", "for _ in range(x[0]): used_objects.append(x[1]) if config['color_dict'] == 'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write", "dataset config file Returns: :return env: (object) Environment for dataset generation \"\"\" env", "from myGym import envs from matplotlib.pyplot import imshow, show import cv2 import numpy", "poly segmentation format bitmap = mask.decode(seg) seg = _segmentationToPoly(bitmap) self.too_small_obj = False try:", "data dictionary :param name: (string) Name of image file for saving \"\"\" data", "seg = _segmentationToPoly(bitmap) self.too_small_obj = False try: #notify and skip the object with", "annotations to output directory \"\"\" if config['make_dataset'] in [\"new\", \"resume\"]: print(\"Storing annotations.json at", "'img_{}_cam{}.json'.format(image_id, camera_id) json_dict = {\"images\": data[\"images\"], \"type\":'instances',\"annotations\": data[\"annotations\"], \"categories\":_category_coco_format()} if len(data[\"images\"]) > 0:", "tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])), 4, [0,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0], -1)", "= config['output_folder'] + '/test' config['output_train_folder'] = config['output_folder'] + '/train' os.makedirs(config[\"output_test_folder\"], exist_ok=True) os.makedirs(config[\"output_train_folder\"], exist_ok=True)", "sys.argv[1]) with open(config_path) as file: config = commentjson.load(file) # initialize dataset generator if", "np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge gripper links img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge robot", "intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, )) if config['make_dataset'] in [\"new\", \"resume\"]: filename = \"_camera_settings\" + '.json'", "env_object.get_name() else: continue if class_name in config['used_class_names']: class_id = config['used_class_names'][class_name] generator.get_append_annotations() #annotate and", "in range(int(image_id/(config['num_steps']*np.count_nonzero(config['active_cameras']))), config['num_episodes']): #loop through episodes print(\"episode: {}/{}\".format(episode, config['num_episodes'])) #env reset #random_robot randomizes", "self.imsize, self.imsize, 3), dtype='f') for t in range(steps): self.env.reset(random_pos=True) self.env.render() action = [random.uniform(1,2)", "\"\"\" Visualize mask and bounding box coordinates for COCO annotated object \"\"\" mask", "image file for saving \"\"\" data = data_test if isTestSample == True else", "projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [255,0,0], -1) image =", "import json import commentjson import sys import random from pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult", "\"utf-8\") #utf-8 format in str bbox = mask.toBbox(seg).flatten().tolist() bounding_box = {'top_left': bbox[:2], 'bottom_right':", "environment for VAE dataset generation according to dataset config file \"\"\" self.env =", "f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4) filename =", "data from preceding dataset generation in COCO data structure :return data_test: (dist) Testing", "config['camera_resolution'][0], \"height\": config['camera_resolution'][1]} self.camera[\"camera_settings\"].append(dict( name=\"camera\" + str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, )) if config['make_dataset'] in", "at episode {} of {}.\".format(filename, name, episode, config['num_episodes'])) with open(os.path.join(path, filename), 'w') as", "x in os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")] image_id = max(max(files_test),max(files_train)) print(\"Resuming from image_id {} for", "episode == 0: # generator.episode_zero() if episode % config['num_episodes_hard_reset'] == 0: #to prevent", "for COCO annotated object \"\"\" mask = img_mask==object_uid mask = np.expand_dims(mask, axis=2) mask", "steps # randomize the movements of robots (using joint control) action = env.action_space.sample()", "corresponding data dict \"\"\" data['images'].append(dict( file_name=name, height=im.shape[0], width=im.shape[1], id=image_id,)) def get_append_annotations(self): #COCO \"\"\"", "store_image_info(self): #DOPE #write dataset image info filename = str(image_id) + '.json' if config['make_dataset']", "\"box3D\": box3D, \"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict) def visualize(self): #DOPE image =", "in range(config['num_steps']): #loop through steps # randomize the movements of robots (using joint", "#loop through episodes print(\"episode: {}/{}\".format(episode, config['num_episodes'])) #env reset #random_robot randomizes the init position", "as f: json.dump(json_dict, f, indent=4) # clear data and continue data[\"images\"].clear() data[\"annotations\"].clear() class", "happen for small numbers try: with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f: data_train = json.load(f)", "camera_resolution = config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0,", "data dict \"\"\" data['images'].append(dict( file_name=name, height=im.shape[0], width=im.shape[1], id=image_id,)) def get_append_annotations(self): #COCO \"\"\" Make", "#identify merged objects in the camera view (in the image) #prepare data strucuture", "1) << 24) + first_link_uid for x in range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids = np.array([((x", "tuple(map(int, projected_cuboid_point)), 4, [0,255,0], -1) for projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int,", "bounding_box = {'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc = create_3D_box(env_object,self.objdim[class_name]) box3D = []", "in the scene \"\"\" seg = segmentationToCocoMask(img_mask,object_uid) area = float(mask.area(seg)) bbox = mask.toBbox(seg).flatten().tolist()", "config file \"\"\" self.env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on = True, gui_on", "generator.episode_zero() if episode % config['num_episodes_hard_reset'] == 0: #to prevent objects vanishing when GUI", "return data_train, data_test def resume(self): #COCO \"\"\" Resume COCO dataset generation Returns: :return", "img_mask = np.where(np.isin(img_mask,gripper_uids), gripper_uids[0], img_mask) #merge gripper links img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask)", "#to prevent objects vanishing when GUI is on print(\"Hard reset!!!\") env.reset(hard=True) env.reset(random_robot=config['random_arm_movement'], random_pos=True,", "class_name in config['used_class_names']: class_id = config['used_class_names'][class_name] generator.get_append_annotations() #annotate and append annotations if config['visualize']:", "for projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [0,255,0], -1) for", "\"\"\" Create COCO json data structure Returns: :return data_train: (dict) Data structure for", "small to be realistically seen print('Too small object of class {} with area={}", "{\"camera_settings\": []} for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] intrinsic_settings =", "= json.load(f) except: pass #happens when test JSON is empty, which can happen", "config_path = pkg_resources.resource_filename(\"myGym\", sys.argv[1]) with open(config_path) as file: config = commentjson.load(file) # initialize", "and info generator.store_image_info() if config['make_dataset'] in [\"new\", \"resume\"]: cv2.imwrite(os.path.join(path, name), im, [int(cv2.IMWRITE_JPEG_QUALITY), 70])", "Environment for dataset generation \"\"\" env = RandomizedEnvWrapper(env=gym.make(config['env_name'], robot = config['robot'], render_on =", "{} with area={} in img {}'.format(self.too_small_obj, self.data_dict['area'], name)) def visualize(self): #COCO \"\"\" Visualize", "steps: (int) Number of episodes initiated during dataset generation \"\"\" data = np.zeros((steps,", "+ str(camera_id), intrinsic_settings=intrinsic_settings, captured_image_size=captured_image_size, )) if config['make_dataset'] in [\"new\", \"resume\"]: filename = \"_camera_settings\"", "= dict( id=self.id_unique, image_id=image_id, category_id=class_id, segmentation=seg, area=area, bbox=bbox, iscrowd=0, ) if self.too_small_obj ==", "self.data_dict = { \"class\": class_name, \"class_id\": class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\":", "sys import random from pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult from pycocotools import mask import", "'vae'!\") #prepare directories config['output_test_folder'] = config['output_folder'] + '/test' config['output_train_folder'] = config['output_folder'] + '/train'", "if len(data[\"images\"]) > 0: with open(os.path.join(folder,json_name), 'w') as f: json.dump(json_dict, f, indent=4) #", "str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img) data[t] = img print(\"Image {}/{}\".format(t, steps)) self.env.close() # main", "(dict) Corresponding data dictionary :param name: (string) Name of image file for saving", "= config['gui_on'], show_bounding_boxes_gui = config['show_bounding_boxes_gui'], changing_light_gui = config['changing_light_gui'], shadows_on = config['shadows_on'], color_dict =", "else data_train name = 'img_{}_cam{}.jpg'.format(image_id,camera_id) return data, name def store_image_info(self): #COCO \"\"\" Append", "commentjson import sys import random from pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult from pycocotools import", "range(x[0]): used_objects.append(x[1]) if config['color_dict'] == 'object_colors': color_names_to_rgb() config['texture_randomizer']['exclude'].append(\"objects\") config['color_randomizer']['exclude'].append(\"objects\") #write config.json to output_folder", "\"new\": #cleanup files files = glob.glob(os.path.join(config['output_test_folder'],'./*')) for f in files: os.remove(f) files =", "projected_cuboid_point)), 4, [0,255,0], -1) for projected_cuboid_point in data[\"objects\"][-1][\"projected_3DBB\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)),", "Used for vision training. import gym from myGym import envs from matplotlib.pyplot import", "config['active_cameras'], camera_resolution = config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0,", "map id->name (just to pretty print) inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj = inv_map[class_id]", "except: #make inverse map id->name (just to pretty print) inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys()))", "test/train data? path = config['output_test_folder'] if isTestSample == True else config['output_train_folder'] #get dataset", "self.data_dict['area'], name)) def visualize(self): #COCO \"\"\" Visualize mask and bounding box coordinates for", "colors to objects by name as specified in the training config file \"\"\"", "length encoding RLE segmentation format seg['counts'] = str(seg['counts'], \"utf-8\") #utf-8 format in str", "== object_uid, env_objects)) if len(env_object_list) > 0: env_object = env_object_list[0] class_name = env_object.get_name()", "from some steps env.render() #only render at the steps/frames we use for dataset", "data, name def store_image_info(self): #COCO \"\"\" Append COCO dataset image info to corresponding", "mask = np.expand_dims(mask, axis=2) mask = 255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1) print(class_name) if self.too_small_obj: cv2.imshow('Too", "[data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])), 4, [255,255,0], -1) print(class_name) cv2.imshow('image',image) cv2.waitKey(1000) self.draw_bounding_box_3D() def draw_bounding_box_3D(self): #DOPE for points", "config['visualize']: #visualize generator.visualize() #store dataset image and info generator.store_image_info() if config['make_dataset'] in [\"new\",", "json.load(f) except: pass #happens when test JSON is empty, which can happen for", "= np.array([((x + 1) << 24) + first_link_uid for x in range(-1, env.robot.gripper_index)],dtype=np.int32)", "format bitmap = mask.decode(seg) seg = _segmentationToPoly(bitmap) self.too_small_obj = False try: #notify and", "mask = img_mask==object_uid mask = np.expand_dims(mask, axis=2) mask = 255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1) print(class_name)", "in config: use one of 'coco', 'dope', 'vae'!\") #prepare directories config['output_test_folder'] = config['output_folder']", "class_name, \"segmentation_class_id\": class_id, \"cuboid_dimensions\": self.objdim[class_name] }) self.data_dict = { \"class\": class_name, \"class_id\": class_id,", "[] for x in config['used_class_names_quantity']: for _ in range(x[0]): used_objects.append(x[1]) if config['color_dict'] ==", "= dict( images=[# file_name, height, width, id ], type='instances', annotations=[# segmentation, area, iscrowd,", "255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1) print(class_name) if self.too_small_obj: cv2.imshow('Too small object', mask) else: cv2.imshow('Labeled object',", "def get_env(self): #COCO \"\"\" Create environment for COCO dataset generation according to dataset", "import gym from myGym import envs from matplotlib.pyplot import imshow, show import cv2", "dataset for c in range(np.count_nonzero(config['active_cameras'])): #loop through active cameras camera_id=np.nonzero(config['active_cameras'])[0][c] image_id = image_id", "), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) return env def episode_zero(self): self.objdim =", "_segmentationToPoly(mask, ): \"\"\" Convert segmentation from RLE to polynoms ([[x1 y1 x2 x2", "from pycocotools.cocostuffhelper import segmentationToCocoMask, segmentationToCocoResult from pycocotools import mask import pybullet as p", "episode {} of {}.\".format(episode, config['num_episodes'])) for flag in ['test','train']: if flag == 'train':", "np.expand_dims(mask, axis=2) mask = 255*mask.astype('uint8') cv2.imshow('image',im) cv2.waitKey(1) print(class_name) if self.too_small_obj: cv2.imshow('Too small object',", "class_name, \"class_id\": class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\": bounding_box, \"cuboid\":", "commentjson.load(file) # initialize dataset generator if config['dataset_type'] == 'coco': generator = GeneratorCoco() elif", "safe to have holes in the ids, just need to monothically increase self.id_unique", "generation in COCO data structure :return image_id: (int) ID of last generated image", "finished. Ready to train!\") raise SystemExit(0) # initialize pybullet env env = generator.get_env()", "data = data_train else: folder = config['output_test_folder'] data = data_test json_name = 'img_{}_cam{}.json'.format(image_id,", "CONFIG_DEFAULT = pkg_resources.resource_filename(\"myGym\", 'configs/dataset_coco.json') # helper functions: def color_names_to_rgb(): \"\"\" Assign RGB colors", "3), dtype='f') for t in range(steps): self.env.reset(random_pos=True) self.env.render() action = [random.uniform(1,2) for x", "structure for testing data \"\"\" data_train, data_test = create_coco_json() return data_train, data_test def", "in value: new_value.append(clr[item]) new_dict[key] = new_value config['color_dict'] = new_dict def _category_coco_format(): #COCO \"\"\"", "#area ok data['annotations'].append(self.data_dict) #append annotations self.id_unique +=1 else: #area too small to be", "projected_cuboid, \"box3D\": box3D, \"projected_3DBB\": projected_3DBB, \"projected_3DBB_centroid\": projected_3DBB_centroid, } data[\"objects\"].append(self.data_dict) def visualize(self): #DOPE image", ") data_test = dict( images=[# file_name, height, width, id ], type='instances', annotations=[# segmentation,", "config_path = CONFIG_DEFAULT print('No config.json passed in argument. Loading default config: {}'.format(CONFIG_DEFAULT)) else:", "config['train_test_split_pct'] # bool, test/train data? path = config['output_test_folder'] if isTestSample == True else", "dataset generation\") data_test, data_train, image_id = generator.resume() elif (config['make_dataset'] == \"display\"): data_train, data_test", "open(os.path.join(folder,json_name), 'w') as f: json.dump(json_dict, f, indent=4) # clear data and continue data[\"images\"].clear()", "cv2 import numpy as np import os import glob import json import commentjson", "len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation = env.get_observation() env_objects = observation[\"objects\"] for obj in env_objects:", "t in range(steps): self.env.reset(random_pos=True) self.env.render() action = [random.uniform(1,2) for x in range(6)] #action", "name = padding * \"0\" + str(t+7999) cv2.imwrite(os.path.join(dataset_pth, \"img_{}.png\".format(name)), img) data[t] = img", "try: with open(os.path.join(config['output_train_folder'],'annotations.json'), 'r') as f: data_train = json.load(f) # get the ID", "f: json.dump(self.object_settings, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4)", "observation = env.get_observation() env_objects = observation[\"objects\"] for obj in env_objects: if obj.name not", "'w') as f: json.dump(self.object_settings, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.object_settings,", "image in preceding dataset generation \"\"\" try: with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f: data_test", "Initialize data structures for COCO dataset annotations Returns: :return data_train: (dict) Data structure", "os.listdir(config['output_test_folder']) if x.endswith(\".jpg\")] files_train = [int(x.replace('.jpg','')) for x in os.listdir(config['output_train_folder']) if x.endswith(\".jpg\")] image_id", "CONFIG_DEFAULT print('No config.json passed in argument. Loading default config: {}'.format(CONFIG_DEFAULT)) else: config_path =", "dictionary :param name: (string) Name of image file for saving \"\"\" data =", "json.load(f) # get the ID of last image img_ids = [img[\"id\"] for img", "# write JSON annotations every n periods or at the end if episode", "dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81) def collect_data(self, steps):", "config['active_cameras'], camera_resolution = config['camera_resolution'], dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0,", "generation Returns: :return data_train: (dict) Training data from preceding dataset generation in COCO", "+ '.json' print(\"Storing {}.\".format(filename)) with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.camera, f, indent=4)", "print(\"Resuming from image_id {} for episodes: {}\".format(image_id, config['num_episodes'])) self.episode_zero() except FileNotFoundError: image_id =", "visualize(self): #DOPE image = im for projected_cuboid_point in data[\"objects\"][-1][\"projected_cuboid\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int,", "COCO annotations for each object in the scene \"\"\" seg = segmentationToCocoMask(img_mask,object_uid) area", "data[\"objects\"][-1][\"projected_3DBB\"]: image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int,", "= env.robot.robot_uid robot_uids = np.array([((x + 1) << 24) + first_link_uid for x", "config: use one of 'coco', 'dope', 'vae'!\") #prepare directories config['output_test_folder'] = config['output_folder'] +", "name)) def visualize(self): #COCO \"\"\" Visualize mask and bounding box coordinates for COCO", "= [] for x in range(boxp.shape[0]): box3D.append(tuple(boxp[x])) boxc = list(boxc) projected_cuboid_centroid = list(env.project_point_to_camera_image(cuboid_centroid,", "4, [0,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_3DBB_centroid\"][0],data[\"objects\"][-1][\"projected_3DBB_centroid\"][1]])), 4, [255,0,0], -1) image =", "image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][0],data[\"objects\"][-1][\"bounding_box\"][\"top_left\"][1]])), 4, [255,255,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][0],data[\"objects\"][-1][\"bounding_box\"][\"bottom_right\"][1]])),", "class for image dataset for VAE vision model training \"\"\" def __init__(self): self.object_settings", "image = cv2.circle(cv2.UMat(image), tuple(map(int, projected_cuboid_point)), 4, [255,0,0], -1) image = cv2.circle(cv2.UMat(image), tuple(map(int, [data[\"objects\"][-1][\"projected_cuboid_centroid\"][0],data[\"objects\"][-1][\"projected_cuboid_centroid\"][1]])),", "as f: json.dump(self.camera, f, indent=4) with open(os.path.join(config['output_train_folder'], filename), 'w') as f: json.dump(self.camera, f,", "[random.uniform(1,2) for x in range(6)] #action = [2,2,2,2,2,2] self.env.robot.reset_random(action) # send the Kuka", "f: json.dump(data, f, indent=4) def get_append_annotations(self): #DOPE cuboid_with_centroid = env_object.get_bounding_box() cuboid_centroid=cuboid_with_centroid[8] cuboid=cuboid_with_centroid[:8] seg", "== 0: #to prevent objects vanishing when GUI is on print(\"Hard reset!!!\") env.reset(hard=True)", "image_id=image_id, category_id=class_id, segmentation=seg, area=area, bbox=bbox, iscrowd=0, ) if self.too_small_obj == False: #area ok", "open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f: data_test = json.load(f) except: pass #happens when test JSON", "scene \"\"\" seg = segmentationToCocoMask(img_mask,object_uid) area = float(mask.area(seg)) bbox = mask.toBbox(seg).flatten().tolist() #1 run", "img_mask) #merge gripper links img_mask = np.where(np.isin(img_mask,robot_uids), robot_uids[0], img_mask) #merge robot links obj_ids", "generation \"\"\" try: with open(os.path.join(config['output_test_folder'],'annotations.json'), 'r') as f: data_test = json.load(f) except: pass", "if isTestSample == True else config['output_train_folder'] #get dataset image and its mask im", "len(data[\"images\"]) > 0: with open(os.path.join(folder,json_name), 'w') as f: json.dump(json_dict, f, indent=4) # clear", "config['num_episodes'])) except FileNotFoundError: image_id = 0 return data_test, data_train, image_id def data_struct_image(self): #COCO", "x in range(-1, env.robot.gripper_index)],dtype=np.int32) gripper_uids = np.array([((x + 1) << 24) + first_link_uid", "episodes print(\"episode: {}/{}\".format(episode, config['num_episodes'])) #env reset #random_robot randomizes the init position of robots", "view (in the image) #prepare data strucuture data, name = generator.data_struct_image() for object_uid", "= config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL, dataset = True, ), config_path = config['output_folder']+'/config_dataset.json') p.setGravity(0, 0, -9.81)", "randomizes the init position of robots #random_pos randomizes the init positions of objects", "it's safe to have holes in the ids, just need to monothically increase", "inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj = inv_map[class_id] self.data_dict = dict( id=self.id_unique, image_id=image_id, category_id=class_id,", "# initialize pybullet env env = generator.get_env() first_link_uid = env.robot.robot_uid robot_uids = np.array([((x", "train of test status Returns: :param data: (dict) Corresponding data dictionary :param name:", "num_objects_range = config['num_objects_range'], used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], renderer=p.ER_BULLET_HARDWARE_OPENGL,", "Returns: :return data_train: (dict) Data structure for training data :return data_test: (dist) Data", "box.p,box.center class GeneratorCoco: #COCO \"\"\" Generator class for COCO image dataset for YOLACT", "num_objects_range = config['num_objects_range'], used_objects = used_objects, active_cameras = config['active_cameras'], camera_resolution = config['camera_resolution'], dataset", "= generator.init_data() generator.episode_zero() elif config['make_dataset'] == 'resume': #resume print(\"Restoring dataset generation\") data_test, data_train,", "to pretty print) inv_map = dict(zip(config['used_class_names'].values(), config['used_class_names'].keys())) self.too_small_obj = inv_map[class_id] self.data_dict = dict(", "\"class_id\": class_id, \"location\":env_object.get_position(), \"quaternion_xyzw\": env_object.get_orientation(), \"cuboid_centroid\": cuboid_centroid, \"projected_cuboid_centroid\": projected_cuboid_centroid, \"bounding_box\": bounding_box, \"cuboid\": cuboid,", "self.env.step(action) img = observation['camera_data'][6]['image'] imgs = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) img = cv2.resize(imgs[0:450,100:500], (self.imsize, self.imsize))", "of objects # if episode == 0: # generator.episode_zero() if episode % config['num_episodes_hard_reset']", "to train!\") raise SystemExit(0) # initialize pybullet env env = generator.get_env() first_link_uid =", "and append COCO annotations for each object in the scene \"\"\" seg =", "f: json.dump(self.object_settings, f, indent=4) class GeneratorVae: \"\"\" Generator class for image dataset for", "'.json' with open(os.path.join(config['output_test_folder'], filename), 'w') as f: json.dump(self.object_settings, f, indent=4) with open(os.path.join(config['output_train_folder'], filename),", "generator = GeneratorDope() elif config['dataset_type'] == 'vae': generator = GeneratorVae() else: raise Exception(\"dataset_type", "len(self.objdim.keys()) < len(config['used_class_names'].keys()): env.reset(random_robot=config['random_arm_movement'], random_pos=False) observation = env.get_observation() env_objects = observation[\"objects\"] for obj", "mask.toBbox(seg).flatten().tolist() bounding_box = {'top_left': bbox[:2], 'bottom_right': [bbox[0]+bbox[2], bbox[1]+bbox[3]]} boxp,boxc = create_3D_box(env_object,self.objdim[class_name]) box3D =", "np.zeros((steps, self.imsize, self.imsize, 3), dtype='f') for t in range(steps): self.env.reset(random_pos=True) self.env.render() action =", "+1 # +1 if last sample were test (thus not in here). it's", "_ = cv2.findContours((mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) segmentationPoly = [] for contour in contours: contour", "dict \"\"\" data['images'].append(dict( file_name=name, height=im.shape[0], width=im.shape[1], id=image_id,)) def get_append_annotations(self): #COCO \"\"\" Make and" ]
[ "self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression ])", "[] while len(assets_unsorted) > 0: acyclic = False for asset in assets_unsorted: for", "import get_file_hash, save_file, load_file import shutil import io from .compiler import ExpressionProcessor from", "ExpressionProcessor from .expressions import stylesheets, scripts, html import subprocess import tempfile class AssetCollection(object):", "def _get_source_dir(self): return self._settings.stylesheets.source def _get_target_dir(self): return self._settings.stylesheets.target def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True))", "html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp()", "get_asset_objects(path, settings): asset_classes = [ ImageAsset, FontAsset, StylesheetAsset, HtmlAsset, ScriptAsset ] file_ext =", "= '{path}' else: t = '{path} ({lang})' common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) return", "and self._lang != other._lang def parse(self): self._parse() def dependencies_modified(self): for dep_asset in self._dependencies:", "subprocess.Popen( [ \"java\", \"-Xss100m\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"css\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE,", "path self._lang = lang self._collection = None self._settings = None self._dependencies = []", "ExpressionProcessor(self, [ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file", "if self._extension == '.coffee': if self._settings.verbose: print('Using CoffeeScript Compiler for {asset}'.format(asset=self)) self.compile_coffee() if", "] file_ext = os.path.splitext(path)[1] for asset_class in asset_classes: if file_ext in asset_class.supported_extensions(): langs", "= '%s%s' % (parts[0], new_ext) if 'lang' in opts and not(opts['lang'] is None):", "get_languages(settings): return settings.html.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'html') def _get_source_dir(self): return self._settings.html.source def", "_get_partials_dir(self): return os.path.join(self._settings.partials, 'images') def _get_source_dir(self): return self._settings.images.source def _get_target_dir(self): return self._settings.images.target class", "from .expressions import stylesheets, scripts, html import subprocess import tempfile class AssetCollection(object): def", "path_part = '%s-%s%s' % (parts[0], lang, parts[1]) if self.is_partial(path_part): target_path = os.path.join(self._get_partials_dir(), path_part)", "is None: continue self._assets.append(res) self._assets[-1]._collection = self self._assets[-1]._settings = settings def find_asset(self, path,", "__ne__(self, other): return self._path != other._path and self._lang != other._lang def parse(self): self._parse()", "\"java\", \"-Xss100m\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"css\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, )", "get_languages(settings): return settings.stylesheets.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'stylesheets') def _get_source_dir(self): return self._settings.stylesheets.source def", "def __init__(self, path, lang=None): super(ImageAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.png', '.jpg',", "asset_class.get_languages(settings) if langs is None: return asset_class(path, None) else: return [asset_class(path, lang) for", "self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class BinaryAsset(Asset): def __init__(self, path, lang=None): super(BinaryAsset, self).__init__(Asset.FILE,", "@staticmethod def supported_extensions(): return ['.js', '.coffee'] @staticmethod def get_languages(settings): return settings.scripts.languages def _get_partials_dir(self):", "CacheEntry from .utils import get_file_hash, save_file, load_file import shutil import io from .compiler", "def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.css\") save_file(source_file, self._data) target_file =", "opts: parts = os.path.splitext(path_part) new_filename = '%s-%s' % (parts[0], opts['hash']) path_part = '%s%s'", "lang) @staticmethod def supported_extensions(): return ['.png', '.jpg', '.gif'] @staticmethod def get_languages(settings): return settings.images.languages", "return self._settings.images.source def _get_target_dir(self): return self._settings.images.target class FontAsset(BinaryAsset): def __init__(self, path, lang=None): super(FontAsset,", "self._settings.images.target class FontAsset(BinaryAsset): def __init__(self, path, lang=None): super(FontAsset, self).__init__(path, lang) @staticmethod def supported_extensions():", "%s\" % path) def __eq__(self, other): return self._path == other._path and self._lang ==", "self._lang = lang self._collection = None self._settings = None self._dependencies = [] self._tool_cache", "def is_partial(self, path): return os.path.basename(path).startswith(\"_\") def get_target_path(self, **opts): common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()])", "_get_source_dir(self): return self._settings.stylesheets.source def _get_target_dir(self): return self._settings.stylesheets.target def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def", "def _get_target_path(self): return self.get_target_path(lang=self._lang) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ html.IncludeExpression, html.StylesheetUrlExpression,", "class BinaryAsset(Asset): def __init__(self, path, lang=None): super(BinaryAsset, self).__init__(Asset.FILE, path, lang) def _get_target_path(self): return", "class FontAsset(BinaryAsset): def __init__(self, path, lang=None): super(FontAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return", "if self._settings.verbose: print('Cached {asset}'.format(asset=self)) else: print(\"String asset\") class TextAsset(Asset): def __init__(self, path, lang=None):", "= load_file(target_file) shutil.rmtree(temp_path) def compile_coffee(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.coffee\") save_file(source_file,", "!= other._path and self._lang != other._lang def parse(self): self._parse() def dependencies_modified(self): for dep_asset", "self._settings = None self._dependencies = [] self._tool_cache = Cache() self._flag_modified = False def", "get_target_path(self, **opts): common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) path_part = self._path[len(common_prefix)+1:] if 'hash' in", "return self._settings.scripts.source def _get_target_dir(self): return self._settings.scripts.target def _get_target_path(self): return self.get_target_path( hash=get_file_hash(self._path, unique=True), change_extension='.js'", "self.is_partial(path_part): target_path = os.path.join(self._get_partials_dir(), path_part) else: target_path = os.path.join(self._get_target_dir(), path_part) return target_path def", "load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._extension == '.coffee': if self._settings.verbose:", "if self._resource_type == Asset.FILE: cache_entry = self._tool_cache.find_entry(self._path, self._lang) file_modified = True if cache_entry", "import tempfile class AssetCollection(object): def __init__(self, file_list, settings): self._assets = [] self._settings =", "= True else: if self._settings.verbose: print('Cached {asset}'.format(asset=self)) else: print(\"String asset\") class TextAsset(Asset): def", "None split = os.path.splitext(path) self._basename = split[0] self._extension = split[1] def load(self): with", "]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.html\") save_file(source_file, self._data)", "lang=None): super(BinaryAsset, self).__init__(Asset.FILE, path, lang) def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): pass", "subprocess.Popen( [ \"java\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"js\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE,", "self._assets = DependencyResolver.topological_sort(self._assets) if self._settings.verbose: print('Build order:\\n{collection}\\n'.format( collection=self._assets)) def build(self): print('Building assets...') for", "stdin=subprocess.PIPE, ) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def compile_coffee(self): temp_path", "tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.coffee\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"source.js\") proc =", "= os.path.commonprefix([ self._path, self._get_source_dir()]) return t.format(path=self._path[len(common_prefix) + 1:], lang=self._lang) def add_dependency(self, path, lang=None):", "in res: self._assets.append(asset) self._assets[-1]._collection = self self._assets[-1]._settings = settings else: if res is", "'scripts') def _get_source_dir(self): return self._settings.scripts.source def _get_target_dir(self): return self._settings.scripts.target def _get_target_path(self): return self.get_target_path(", "self._processor = ExpressionProcessor(self, [ html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression ])", "return True return False def compile(self, force=False): if self._resource_type == Asset.FILE: cache_entry =", "path): return os.path.basename(path).startswith(\"_\") def get_target_path(self, **opts): common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) path_part =", "return settings.html.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'html') def _get_source_dir(self): return self._settings.html.source def _get_target_dir(self):", "= tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.html\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.html\") proc", "def _get_partials_dir(self): return os.path.join(self._settings.partials, 'scripts') def _get_source_dir(self): return self._settings.scripts.source def _get_target_dir(self): return self._settings.scripts.target", "_get_partials_dir(self): return os.path.join(self._settings.partials, 'scripts') def _get_source_dir(self): return self._settings.scripts.source def _get_target_dir(self): return self._settings.scripts.target def", "def save(self, path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path, self._data) class StylesheetAsset(TextAsset): @staticmethod def", "else: print(\"Couldn't find dependency with path %s\" % path) def __eq__(self, other): return", "def _get_partials_dir(self): return os.path.join(self._settings.partials, 'html') def _get_source_dir(self): return self._settings.html.source def _get_target_dir(self): return self._settings.html.target", "+ 1:], lang=self._lang) def add_dependency(self, path, lang=None): dependency = self._collection.find_asset(path, lang) if dependency:", "dependency occurred') return assets_sorted class Asset(object): FILE = 0 STRING = 1 def", "ExpressionProcessor(self, [ html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression ]) self._processor.parse() def", "save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.css\") proc = subprocess.Popen( [ \"java\", \"-Xss100m\", \"-jar\",", "def __ne__(self, other): return self._path != other._path and self._lang != other._lang def parse(self):", "= os.path.splitext(path) self._basename = split[0] self._extension = split[1] def load(self): with io.open(self._path, 'r',", "for {asset}'.format(asset=self)) self.compile_coffee() if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify()", "self._collection = None self._settings = None self._dependencies = [] self._tool_cache = Cache() self._flag_modified", "== Asset.FILE: cache_entry = self._tool_cache.find_entry(self._path, self._lang) file_modified = True if cache_entry is None\\", "topological_sort(assets_unsorted): assets_sorted = [] while len(assets_unsorted) > 0: acyclic = False for asset", "return os.path.join(self._settings.partials, 'stylesheets') def _get_source_dir(self): return self._settings.stylesheets.source def _get_target_dir(self): return self._settings.stylesheets.target def _get_target_path(self):", "None def pick_dependencies(self): print('Found {count:d} assets'.format(count=len(self._assets))) if self._settings.verbose: print(\"Picking dependencies...\") for asset in", "cache_entry.file_modified() or self.dependencies_modified() if file_modified or force: if cache_entry: if os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path", "def supported_extensions(): return ['.eot', '.svg', '.ttf', '.woff'] @staticmethod def get_languages(settings): return settings.fonts.languages def", "{asset}'.format(asset=self)) self.minify() self.save(target_path) class BinaryAsset(Asset): def __init__(self, path, lang=None): super(BinaryAsset, self).__init__(Asset.FILE, path, lang)", "(parts[0], lang, parts[1]) if self.is_partial(path_part): target_path = os.path.join(self._get_partials_dir(), path_part) else: target_path = os.path.join(self._get_target_dir(),", "minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.js\") save_file(source_file, self._data) target_file = os.path.join(temp_path,", "collection=self._assets)) def build(self): print('Building assets...') for asset in self._assets: asset.compile(force=self._settings.force) print('Build done.') class", "langs = asset_class.get_languages(settings) if langs is None: return asset_class(path, None) else: return [asset_class(path,", "self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class ScriptAsset(TextAsset): @staticmethod def supported_extensions(): return ['.js', '.coffee']", "target_file = os.path.join(temp_path, \"source.js\") proc = subprocess.Popen( [ self._settings.coffee_bin, \"-c\", source_file ], stdout=subprocess.PIPE,", "change_extension='.js' ) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression", "opts['change_extension'] parts = os.path.splitext(path_part) path_part = '%s%s' % (parts[0], new_ext) if 'lang' in", "= ExpressionProcessor(self, [ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp()", "print('Created {asset}'.format(asset=self)) self._flag_modified = True else: if self._settings.verbose: print('Cached {asset}'.format(asset=self)) else: print(\"String asset\")", "as f: self._data = f.read() def save(self, path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path,", "if dependency: if dependency not in self._dependencies: self._dependencies.append(dependency) else: print(\"Couldn't find dependency with", "= subprocess.Popen( [ \"java\", \"-Xss100m\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"css\", \"-o\", target_file, source_file ],", "settings.scripts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'scripts') def _get_source_dir(self): return self._settings.scripts.source def _get_target_dir(self): return", "os.path.join(temp_path, \"source.js\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.js\") proc = subprocess.Popen( [ \"java\",", "super(BinaryAsset, self).__init__(Asset.FILE, path, lang) def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): pass def", "def __init__(self, resource_type, path, lang): self._resource_type = resource_type self._path = path self._lang =", "super(FontAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.eot', '.svg', '.ttf', '.woff'] @staticmethod def", "self._assets = [] self._settings = settings for path in file_list: res = get_asset_objects(path,", "and not(opts['lang'] is None): lang = opts['lang'] parts = os.path.splitext(path_part) path_part = '%s-%s%s'", "cache_entry: cache_entry.target = target_path self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self)) else: cache_entry = CacheEntry(self._path, target_path, self._lang)", "super(TextAsset, self).__init__(Asset.FILE, path, lang) self._data = None split = os.path.splitext(path) self._basename = split[0]", "return ['.eot', '.svg', '.ttf', '.woff'] @staticmethod def get_languages(settings): return settings.fonts.languages def _get_partials_dir(self): return", ".utils import get_file_hash, save_file, load_file import shutil import io from .compiler import ExpressionProcessor", "source_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def", "path, lang=None): dependency = self._collection.find_asset(path, lang) if dependency: if dependency not in self._dependencies:", "ScriptAsset(TextAsset): @staticmethod def supported_extensions(): return ['.js', '.coffee'] @staticmethod def get_languages(settings): return settings.scripts.languages def", "self._assets[-1]._collection = self self._assets[-1]._settings = settings else: if res is None: continue self._assets.append(res)", "if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class BinaryAsset(Asset):", "shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._extension == '.coffee': if self._settings.verbose: print('Using", "stderr=subprocess.PIPE) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings,", "= proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._extension", "err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if", "stdout=subprocess.PIPE, stdin=subprocess.PIPE ) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self,", "_compile(self, target_path): if not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path) class ImageAsset(BinaryAsset): def __init__(self, path,", "asset in self._assets: asset.parse() if self._settings.verbose: print(asset) print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets = DependencyResolver.topological_sort(self._assets)", "= path self._lang = lang self._collection = None self._settings = None self._dependencies =", "source_file = os.path.join(temp_path, \"source.js\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.js\") proc = subprocess.Popen(", "settings.images.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'images') def _get_source_dir(self): return self._settings.images.source def _get_target_dir(self): return", "_get_source_dir(self): return self._settings.images.source def _get_target_dir(self): return self._settings.images.target class FontAsset(BinaryAsset): def __init__(self, path, lang=None):", "if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class HtmlAsset(TextAsset):", "False def is_partial(self, path): return os.path.basename(path).startswith(\"_\") def get_target_path(self, **opts): common_prefix = os.path.commonprefix([ self._path,", "for dependency in asset._dependencies: if dependency in assets_unsorted: break else: acyclic = True", "return self._settings.fonts.target def get_asset_objects(path, settings): asset_classes = [ ImageAsset, FontAsset, StylesheetAsset, HtmlAsset, ScriptAsset", "find dependency with path %s\" % path) def __eq__(self, other): return self._path ==", "else: acyclic = True assets_unsorted.remove(asset) assets_sorted.append(asset) if not acyclic: raise RuntimeError('A cyclic dependency", "target_path = os.path.join(self._get_target_dir(), path_part) return target_path def __repr__(self): if self._lang is None: t", "self._data) target_file = os.path.join(temp_path, \"target.css\") proc = subprocess.Popen( [ \"java\", \"-Xss100m\", \"-jar\", self._settings.yuicompressor_file,", "dependency with path %s\" % path) def __eq__(self, other): return self._path == other._path", "def _get_source_dir(self): return self._settings.scripts.source def _get_target_dir(self): return self._settings.scripts.target def _get_target_path(self): return self.get_target_path( hash=get_file_hash(self._path,", "os.path.join(self._get_partials_dir(), path_part) else: target_path = os.path.join(self._get_target_dir(), path_part) return target_path def __repr__(self): if self._lang", "def add_dependency(self, path, lang=None): dependency = self._collection.find_asset(path, lang) if dependency: if dependency not", "'%s-%s' % (parts[0], opts['hash']) path_part = '%s%s' % (new_filename, parts[1]) if 'change_extension' in", "path) def __eq__(self, other): return self._path == other._path and self._lang == other._lang def", "target_path self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self)) else: cache_entry = CacheEntry(self._path, target_path, self._lang) self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self))", "if dependency in assets_unsorted: break else: acyclic = True assets_unsorted.remove(asset) assets_sorted.append(asset) if not", "new_ext = opts['change_extension'] parts = os.path.splitext(path_part) path_part = '%s%s' % (parts[0], new_ext) if", "self._compile(target_path) if cache_entry: cache_entry.target = target_path self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self)) else: cache_entry = CacheEntry(self._path,", "if not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path) class ImageAsset(BinaryAsset): def __init__(self, path, lang=None): super(ImageAsset,", "os.path.splitext(path_part) path_part = '%s%s' % (parts[0], new_ext) if 'lang' in opts and not(opts['lang']", "self._assets: if asset._path == path and asset._lang == lang: return asset return None", "in assets_unsorted: break else: acyclic = True assets_unsorted.remove(asset) assets_sorted.append(asset) if not acyclic: raise", "done.') class DependencyResolver(object): @staticmethod def topological_sort(assets_unsorted): assets_sorted = [] while len(assets_unsorted) > 0:", "if cache_entry: cache_entry.target = target_path self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self)) else: cache_entry = CacheEntry(self._path, target_path,", "def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.js\") save_file(source_file, self._data) target_file =", "'.coffee': if self._settings.verbose: print('Using CoffeeScript Compiler for {asset}'.format(asset=self)) self.compile_coffee() if self._settings.minify and not", "True assets_unsorted.remove(asset) assets_sorted.append(asset) if not acyclic: raise RuntimeError('A cyclic dependency occurred') return assets_sorted", "other._path and self._lang == other._lang def __ne__(self, other): return self._path != other._path and", "TextAsset(Asset): def __init__(self, path, lang=None): super(TextAsset, self).__init__(Asset.FILE, path, lang) self._data = None split", "hash=get_file_hash(self._path, unique=True), change_extension='.js' ) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ scripts.IncludeExpression, scripts.ScriptUrlExpression,", "if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class BinaryAsset(Asset): def __init__(self, path, lang=None): super(BinaryAsset,", "print(asset) print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets = DependencyResolver.topological_sort(self._assets) if self._settings.verbose: print('Build order:\\n{collection}\\n'.format( collection=self._assets)) def", "self._processor.compile(self._settings, target_path) if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path)", "self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class ScriptAsset(TextAsset): @staticmethod def supported_extensions(): return", "self._settings.yuicompressor_file, \"--type\", \"js\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err =", "ImageAsset(BinaryAsset): def __init__(self, path, lang=None): super(ImageAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.png',", "], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self,", "not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path) class ImageAsset(BinaryAsset): def __init__(self, path, lang=None): super(ImageAsset, self).__init__(path,", "{asset}'.format(asset=self)) self.compile_coffee() if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path)", "assets_unsorted: for dependency in asset._dependencies: if dependency in assets_unsorted: break else: acyclic =", "\"js\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err = proc.communicate() self._data", "]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.js\") save_file(source_file, self._data)", "self.load() self._processor = ExpressionProcessor(self, [ html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression", "['.css', '.scss'] @staticmethod def get_languages(settings): return settings.stylesheets.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'stylesheets') def", "stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def compile_coffee(self):", "scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path,", "not in self._dependencies: self._dependencies.append(dependency) else: print(\"Couldn't find dependency with path %s\" % path)", "self._data = f.read() def save(self, path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path, self._data) class", "else: print(\"String asset\") class TextAsset(Asset): def __init__(self, path, lang=None): super(TextAsset, self).__init__(Asset.FILE, path, lang)", ") out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings,", "in self._assets: if asset._path == path and asset._lang == lang: return asset return", "self._lang is None: t = '{path}' else: t = '{path} ({lang})' common_prefix =", "settings): asset_classes = [ ImageAsset, FontAsset, StylesheetAsset, HtmlAsset, ScriptAsset ] file_ext = os.path.splitext(path)[1]", "= self self._assets[-1]._settings = settings def find_asset(self, path, lang): for asset in self._assets:", "CacheEntry(self._path, target_path, self._lang) self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self)) self._flag_modified = True else: if self._settings.verbose: print('Cached", "None: return asset_class(path, None) else: return [asset_class(path, lang) for lang in langs] return", "def __repr__(self): if self._lang is None: t = '{path}' else: t = '{path}", "self._lang) self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self)) self._flag_modified = True else: if self._settings.verbose: print('Cached {asset}'.format(asset=self)) else:", "\"source.coffee\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"source.js\") proc = subprocess.Popen( [ self._settings.coffee_bin, \"-c\",", "'images') def _get_source_dir(self): return self._settings.images.source def _get_target_dir(self): return self._settings.images.target class FontAsset(BinaryAsset): def __init__(self,", "in self._dependencies: if dep_asset._flag_modified: return True return False def compile(self, force=False): if self._resource_type", "common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) return t.format(path=self._path[len(common_prefix) + 1:], lang=self._lang) def add_dependency(self, path,", "self._assets.append(asset) self._assets[-1]._collection = self self._assets[-1]._settings = settings else: if res is None: continue", "file_modified or force: if cache_entry: if os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path = self._get_target_path() self._compile(target_path) if", "os.path.join(temp_path, \"target.css\") proc = subprocess.Popen( [ \"java\", \"-Xss100m\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"css\", \"-o\",", "__init__(self, path, lang=None): super(BinaryAsset, self).__init__(Asset.FILE, path, lang) def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def", "\"-c\", source_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path)", "for asset in assets_unsorted: for dependency in asset._dependencies: if dependency in assets_unsorted: break", "t.format(path=self._path[len(common_prefix) + 1:], lang=self._lang) def add_dependency(self, path, lang=None): dependency = self._collection.find_asset(path, lang) if", "= settings def find_asset(self, path, lang): for asset in self._assets: if asset._path ==", "supported_extensions(): return ['.eot', '.svg', '.ttf', '.woff'] @staticmethod def get_languages(settings): return settings.fonts.languages def _get_partials_dir(self):", "= [] self._tool_cache = Cache() self._flag_modified = False def is_partial(self, path): return os.path.basename(path).startswith(\"_\")", "self._settings.scripts.source def _get_target_dir(self): return self._settings.scripts.target def _get_target_path(self): return self.get_target_path( hash=get_file_hash(self._path, unique=True), change_extension='.js' )", "in file_list: res = get_asset_objects(path, settings) if type(res) is list: for asset in", "out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def compile_coffee(self): temp_path = tempfile.mkdtemp()", "{asset}'.format(asset=self)) self.minify() self.save(target_path) class ScriptAsset(TextAsset): @staticmethod def supported_extensions(): return ['.js', '.coffee'] @staticmethod def", "html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path", "target_path) if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class", "lang=None): super(FontAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.eot', '.svg', '.ttf', '.woff'] @staticmethod", "self._assets.append(res) self._assets[-1]._collection = self self._assets[-1]._settings = settings def find_asset(self, path, lang): for asset", "assets_unsorted.remove(asset) assets_sorted.append(asset) if not acyclic: raise RuntimeError('A cyclic dependency occurred') return assets_sorted class", "new_ext) if 'lang' in opts and not(opts['lang'] is None): lang = opts['lang'] parts", "settings.html.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'html') def _get_source_dir(self): return self._settings.html.source def _get_target_dir(self): return", "os.path.join(temp_path, \"target.html\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.htmlcompressor_file, \"--type\", \"html\", \"--mask\", \"*.html\",", "self self._assets[-1]._settings = settings def find_asset(self, path, lang): for asset in self._assets: if", "self._extension = split[1] def load(self): with io.open(self._path, 'r', encoding='utf-8') as f: self._data =", "os.path.join(self._get_target_dir(), path_part) return target_path def __repr__(self): if self._lang is None: t = '{path}'", "dep_asset._flag_modified: return True return False def compile(self, force=False): if self._resource_type == Asset.FILE: cache_entry", "\"--mask\", \"*.html\", \"-o\", target_file, source_file, \"--remove-intertag-spaces\" ], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) out, err =", "['.eot', '.svg', '.ttf', '.woff'] @staticmethod def get_languages(settings): return settings.fonts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials,", "cache_entry.target = target_path self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self)) else: cache_entry = CacheEntry(self._path, target_path, self._lang) self._tool_cache.add(cache_entry)", "= tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.css\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.css\") proc", "'.woff'] @staticmethod def get_languages(settings): return settings.fonts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'fonts') def _get_source_dir(self):", "_get_partials_dir(self): return os.path.join(self._settings.partials, 'html') def _get_source_dir(self): return self._settings.html.source def _get_target_dir(self): return self._settings.html.target def", "temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.js\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.js\")", "pick_dependencies(self): print('Found {count:d} assets'.format(count=len(self._assets))) if self._settings.verbose: print(\"Picking dependencies...\") for asset in self._assets: asset.parse()", "proc = subprocess.Popen( [ \"java\", \"-Xss100m\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"css\", \"-o\", target_file, source_file", "os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path, self._data) class StylesheetAsset(TextAsset): @staticmethod def supported_extensions(): return ['.css', '.scss'] @staticmethod", "os from .cache import Cache from .models import CacheEntry from .utils import get_file_hash,", "asset_classes: if file_ext in asset_class.supported_extensions(): langs = asset_class.get_languages(settings) if langs is None: return", "shutil import io from .compiler import ExpressionProcessor from .expressions import stylesheets, scripts, html", "% path) def __eq__(self, other): return self._path == other._path and self._lang == other._lang", "def __init__(self, path, lang=None): super(TextAsset, self).__init__(Asset.FILE, path, lang) self._data = None split =", "\"css\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err = proc.communicate() self._data", "'.scss'] @staticmethod def get_languages(settings): return settings.stylesheets.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'stylesheets') def _get_source_dir(self):", "in self._assets: asset.compile(force=self._settings.force) print('Build done.') class DependencyResolver(object): @staticmethod def topological_sort(assets_unsorted): assets_sorted = []", "settings): self._assets = [] self._settings = settings for path in file_list: res =", "super(ImageAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.png', '.jpg', '.gif'] @staticmethod def get_languages(settings):", "def __init__(self, file_list, settings): self._assets = [] self._settings = settings for path in", "self._settings.images.source def _get_target_dir(self): return self._settings.images.target class FontAsset(BinaryAsset): def __init__(self, path, lang=None): super(FontAsset, self).__init__(path,", "and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class HtmlAsset(TextAsset): @staticmethod def", "stylesheets.IncludeExpression, stylesheets.FontUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.css\")", "def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying", "__init__(self, path, lang=None): super(TextAsset, self).__init__(Asset.FILE, path, lang) self._data = None split = os.path.splitext(path)", "= False def is_partial(self, path): return os.path.basename(path).startswith(\"_\") def get_target_path(self, **opts): common_prefix = os.path.commonprefix([", "cache_entry = CacheEntry(self._path, target_path, self._lang) self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self)) self._flag_modified = True else: if", "_get_target_dir(self): return self._settings.fonts.target def get_asset_objects(path, settings): asset_classes = [ ImageAsset, FontAsset, StylesheetAsset, HtmlAsset,", "is None): lang = opts['lang'] parts = os.path.splitext(path_part) path_part = '%s-%s%s' % (parts[0],", "@staticmethod def supported_extensions(): return ['.css', '.scss'] @staticmethod def get_languages(settings): return settings.stylesheets.languages def _get_partials_dir(self):", "\"--type\", \"html\", \"--mask\", \"*.html\", \"-o\", target_file, source_file, \"--remove-intertag-spaces\" ], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) out,", "\"-o\", target_file, source_file, \"--remove-intertag-spaces\" ], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) out, err = proc.communicate() self._data", "STRING = 1 def __init__(self, resource_type, path, lang): self._resource_type = resource_type self._path =", "split[1] def load(self): with io.open(self._path, 'r', encoding='utf-8') as f: self._data = f.read() def", "\"*.html\", \"-o\", target_file, source_file, \"--remove-intertag-spaces\" ], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) out, err = proc.communicate()", "get_asset_objects(path, settings) if type(res) is list: for asset in res: self._assets.append(asset) self._assets[-1]._collection =", "\"source.js\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.js\") proc = subprocess.Popen( [ \"java\", \"-jar\",", "else cache_entry.file_modified() or self.dependencies_modified() if file_modified or force: if cache_entry: if os.path.exists(cache_entry.target): os.remove(cache_entry.target)", "[ self._settings.coffee_bin, \"-c\", source_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() self._data =", "\"-jar\", self._settings.htmlcompressor_file, \"--type\", \"html\", \"--mask\", \"*.html\", \"-o\", target_file, source_file, \"--remove-intertag-spaces\" ], stdout=subprocess.PIPE, stdin=subprocess.PIPE", "ExpressionProcessor(self, [ scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp()", "_get_target_dir(self): return self._settings.stylesheets.target def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): self.load() self._processor =", "supported_extensions(): return ['.png', '.jpg', '.gif'] @staticmethod def get_languages(settings): return settings.images.languages def _get_partials_dir(self): return", "other._lang def parse(self): self._parse() def dependencies_modified(self): for dep_asset in self._dependencies: if dep_asset._flag_modified: return", "def parse(self): self._parse() def dependencies_modified(self): for dep_asset in self._dependencies: if dep_asset._flag_modified: return True", "self._data) target_file = os.path.join(temp_path, \"target.html\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.htmlcompressor_file, \"--type\",", "res = get_asset_objects(path, settings) if type(res) is list: for asset in res: self._assets.append(asset)", "self._settings.stylesheets.target def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [", "self self._assets[-1]._settings = settings else: if res is None: continue self._assets.append(res) self._assets[-1]._collection =", "split[0] self._extension = split[1] def load(self): with io.open(self._path, 'r', encoding='utf-8') as f: self._data", "[ ImageAsset, FontAsset, StylesheetAsset, HtmlAsset, ScriptAsset ] file_ext = os.path.splitext(path)[1] for asset_class in", "% (parts[0], new_ext) if 'lang' in opts and not(opts['lang'] is None): lang =", "__init__(self, resource_type, path, lang): self._resource_type = resource_type self._path = path self._lang = lang", "0 STRING = 1 def __init__(self, resource_type, path, lang): self._resource_type = resource_type self._path", "common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) path_part = self._path[len(common_prefix)+1:] if 'hash' in opts: parts", "self._path, self._get_source_dir()]) return t.format(path=self._path[len(common_prefix) + 1:], lang=self._lang) def add_dependency(self, path, lang=None): dependency =", "_parse(self): self.load() self._processor = ExpressionProcessor(self, [ scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression ]) self._processor.parse() def", "with io.open(self._path, 'r', encoding='utf-8') as f: self._data = f.read() def save(self, path): if", "@staticmethod def get_languages(settings): return settings.stylesheets.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'stylesheets') def _get_source_dir(self): return", "save(self, path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path, self._data) class StylesheetAsset(TextAsset): @staticmethod def supported_extensions():", "self._settings.stylesheets.source def _get_target_dir(self): return self._settings.stylesheets.target def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): self.load()", "in opts: parts = os.path.splitext(path_part) new_filename = '%s-%s' % (parts[0], opts['hash']) path_part =", "else: cache_entry = CacheEntry(self._path, target_path, self._lang) self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self)) self._flag_modified = True else:", "and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class BinaryAsset(Asset): def __init__(self,", "path, lang=None): super(ImageAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.png', '.jpg', '.gif'] @staticmethod", "= load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._extension == '.coffee': if", "= settings else: if res is None: continue self._assets.append(res) self._assets[-1]._collection = self self._assets[-1]._settings", "def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): pass def _compile(self, target_path): if not", "self.get_target_path(lang=self._lang) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression,", "def _get_partials_dir(self): return os.path.join(self._settings.partials, 'stylesheets') def _get_source_dir(self): return self._settings.stylesheets.source def _get_target_dir(self): return self._settings.stylesheets.target", "return self._path == other._path and self._lang == other._lang def __ne__(self, other): return self._path", "RuntimeError('A cyclic dependency occurred') return assets_sorted class Asset(object): FILE = 0 STRING =", "t = '{path} ({lang})' common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) return t.format(path=self._path[len(common_prefix) + 1:],", "supported_extensions(): return ['.css', '.scss'] @staticmethod def get_languages(settings): return settings.stylesheets.languages def _get_partials_dir(self): return os.path.join(self._settings.partials,", "= self self._assets[-1]._settings = settings else: if res is None: continue self._assets.append(res) self._assets[-1]._collection", "os.remove(cache_entry.target) target_path = self._get_target_path() self._compile(target_path) if cache_entry: cache_entry.target = target_path self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self))", "Cache() self._flag_modified = False def is_partial(self, path): return os.path.basename(path).startswith(\"_\") def get_target_path(self, **opts): common_prefix", ".compiler import ExpressionProcessor from .expressions import stylesheets, scripts, html import subprocess import tempfile", "or self.dependencies_modified() if file_modified or force: if cache_entry: if os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path =", "self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self)) self._flag_modified = True else: if self._settings.verbose: print('Cached {asset}'.format(asset=self)) else: print(\"String", "class TextAsset(Asset): def __init__(self, path, lang=None): super(TextAsset, self).__init__(Asset.FILE, path, lang) self._data = None", "lang=None): super(ImageAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.png', '.jpg', '.gif'] @staticmethod def", "occurred') return assets_sorted class Asset(object): FILE = 0 STRING = 1 def __init__(self,", "= os.path.join(temp_path, \"target.css\") proc = subprocess.Popen( [ \"java\", \"-Xss100m\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"css\",", "'change_extension' in opts: new_ext = opts['change_extension'] parts = os.path.splitext(path_part) path_part = '%s%s' %", "self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class HtmlAsset(TextAsset): @staticmethod def supported_extensions(): return ['.html'] @staticmethod", "target_file = os.path.join(temp_path, \"target.html\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.htmlcompressor_file, \"--type\", \"html\",", "self._settings.yuicompressor_file, \"--type\", \"css\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err =", "return settings.fonts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'fonts') def _get_source_dir(self): return self._settings.fonts.source def _get_target_dir(self):", "= tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.coffee\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"source.js\") proc", "return self._settings.scripts.target def _get_target_path(self): return self.get_target_path( hash=get_file_hash(self._path, unique=True), change_extension='.js' ) def _parse(self): self.load()", "self._get_source_dir()]) path_part = self._path[len(common_prefix)+1:] if 'hash' in opts: parts = os.path.splitext(path_part) new_filename =", "settings.fonts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'fonts') def _get_source_dir(self): return self._settings.fonts.source def _get_target_dir(self): return", "def __init__(self, path, lang=None): super(FontAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.eot', '.svg',", "_parse(self): pass def _compile(self, target_path): if not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path) class ImageAsset(BinaryAsset):", "print('Build order:\\n{collection}\\n'.format( collection=self._assets)) def build(self): print('Building assets...') for asset in self._assets: asset.compile(force=self._settings.force) print('Build", "compile(self, force=False): if self._resource_type == Asset.FILE: cache_entry = self._tool_cache.find_entry(self._path, self._lang) file_modified = True", "else: t = '{path} ({lang})' common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) return t.format(path=self._path[len(common_prefix) +", "scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file =", "proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def compile_coffee(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path,", "def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._extension == '.coffee': if self._settings.verbose: print('Using CoffeeScript", "split = os.path.splitext(path) self._basename = split[0] self._extension = split[1] def load(self): with io.open(self._path,", "and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class ScriptAsset(TextAsset): @staticmethod def", "self._assets[-1]._collection = self self._assets[-1]._settings = settings def find_asset(self, path, lang): for asset in", "while len(assets_unsorted) > 0: acyclic = False for asset in assets_unsorted: for dependency", "\"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err = proc.communicate() self._data =", "CoffeeScript Compiler for {asset}'.format(asset=self)) self.compile_coffee() if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying", "print('Build done.') class DependencyResolver(object): @staticmethod def topological_sort(assets_unsorted): assets_sorted = [] while len(assets_unsorted) >", "f: self._data = f.read() def save(self, path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path, self._data)", "StylesheetAsset, HtmlAsset, ScriptAsset ] file_ext = os.path.splitext(path)[1] for asset_class in asset_classes: if file_ext", "if res is None: continue self._assets.append(res) self._assets[-1]._collection = self self._assets[-1]._settings = settings def", "source_file, \"--remove-intertag-spaces\" ], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) out, err = proc.communicate() self._data = load_file(target_file)", "unique=True), change_extension='.js' ) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression,", "supported_extensions(): return ['.html'] @staticmethod def get_languages(settings): return settings.html.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'html')", "= '{path} ({lang})' common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) return t.format(path=self._path[len(common_prefix) + 1:], lang=self._lang)", "[ \"java\", \"-Xss100m\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"css\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE,", "target_path): if not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path) class ImageAsset(BinaryAsset): def __init__(self, path, lang=None):", "@staticmethod def get_languages(settings): return settings.images.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'images') def _get_source_dir(self): return", "add_dependency(self, path, lang=None): dependency = self._collection.find_asset(path, lang) if dependency: if dependency not in", "\"target.html\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.htmlcompressor_file, \"--type\", \"html\", \"--mask\", \"*.html\", \"-o\",", "self).__init__(Asset.FILE, path, lang) def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): pass def _compile(self,", "[] self._settings = settings for path in file_list: res = get_asset_objects(path, settings) if", "def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression ]) self._processor.parse() def", "class HtmlAsset(TextAsset): @staticmethod def supported_extensions(): return ['.html'] @staticmethod def get_languages(settings): return settings.html.languages def", "self).__init__(Asset.FILE, path, lang) self._data = None split = os.path.splitext(path) self._basename = split[0] self._extension", "return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): pass def _compile(self, target_path): if not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path))", "= [] while len(assets_unsorted) > 0: acyclic = False for asset in assets_unsorted:", "_get_target_dir(self): return self._settings.scripts.target def _get_target_path(self): return self.get_target_path( hash=get_file_hash(self._path, unique=True), change_extension='.js' ) def _parse(self):", "target_path = os.path.join(self._get_partials_dir(), path_part) else: target_path = os.path.join(self._get_target_dir(), path_part) return target_path def __repr__(self):", "parts[1]) if self.is_partial(path_part): target_path = os.path.join(self._get_partials_dir(), path_part) else: target_path = os.path.join(self._get_target_dir(), path_part) return", "force: if cache_entry: if os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path = self._get_target_path() self._compile(target_path) if cache_entry: cache_entry.target", "load_file import shutil import io from .compiler import ExpressionProcessor from .expressions import stylesheets,", "True if cache_entry is None\\ else cache_entry.file_modified() or self.dependencies_modified() if file_modified or force:", "@staticmethod def supported_extensions(): return ['.html'] @staticmethod def get_languages(settings): return settings.html.languages def _get_partials_dir(self): return", "temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.css\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.css\")", "= False for asset in assets_unsorted: for dependency in asset._dependencies: if dependency in", "return self.get_target_path( hash=get_file_hash(self._path, unique=True), change_extension='.js' ) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [", "% (parts[0], opts['hash']) path_part = '%s%s' % (new_filename, parts[1]) if 'change_extension' in opts:", "self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._extension == '.coffee':", "= subprocess.Popen( [ \"java\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"js\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE,", "def _get_source_dir(self): return self._settings.images.source def _get_target_dir(self): return self._settings.images.target class FontAsset(BinaryAsset): def __init__(self, path,", "asset in self._assets: if asset._path == path and asset._lang == lang: return asset", "return self._settings.fonts.source def _get_target_dir(self): return self._settings.fonts.target def get_asset_objects(path, settings): asset_classes = [ ImageAsset,", "save_file, load_file import shutil import io from .compiler import ExpressionProcessor from .expressions import", "file_ext = os.path.splitext(path)[1] for asset_class in asset_classes: if file_ext in asset_class.supported_extensions(): langs =", "os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path) class ImageAsset(BinaryAsset): def __init__(self, path, lang=None): super(ImageAsset, self).__init__(path, lang)", "self._settings = settings for path in file_list: res = get_asset_objects(path, settings) if type(res)", "def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.html\") save_file(source_file, self._data) target_file =", "['.js', '.coffee'] @staticmethod def get_languages(settings): return settings.scripts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'scripts') def", "return os.path.join(self._settings.partials, 'images') def _get_source_dir(self): return self._settings.images.source def _get_target_dir(self): return self._settings.images.target class FontAsset(BinaryAsset):", "path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path, self._data) class StylesheetAsset(TextAsset): @staticmethod def supported_extensions(): return", "= os.path.join(temp_path, \"source.css\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.css\") proc = subprocess.Popen( [", ".expressions import stylesheets, scripts, html import subprocess import tempfile class AssetCollection(object): def __init__(self,", "temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.html\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.html\")", "AssetCollection(object): def __init__(self, file_list, settings): self._assets = [] self._settings = settings for path", "(parts[0], opts['hash']) path_part = '%s%s' % (new_filename, parts[1]) if 'change_extension' in opts: new_ext", "self._dependencies: self._dependencies.append(dependency) else: print(\"Couldn't find dependency with path %s\" % path) def __eq__(self,", "else: if res is None: continue self._assets.append(res) self._assets[-1]._collection = self self._assets[-1]._settings = settings", "BinaryAsset(Asset): def __init__(self, path, lang=None): super(BinaryAsset, self).__init__(Asset.FILE, path, lang) def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path,", "source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path)", "_get_target_path(self): return self.get_target_path( hash=get_file_hash(self._path, unique=True), change_extension='.js' ) def _parse(self): self.load() self._processor = ExpressionProcessor(self,", "path, lang) def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): pass def _compile(self, target_path):", "path_part = '%s%s' % (new_filename, parts[1]) if 'change_extension' in opts: new_ext = opts['change_extension']", "if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class ScriptAsset(TextAsset): @staticmethod def supported_extensions(): return ['.js',", "t = '{path}' else: t = '{path} ({lang})' common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()])", "dep_asset in self._dependencies: if dep_asset._flag_modified: return True return False def compile(self, force=False): if", "opts: new_ext = opts['change_extension'] parts = os.path.splitext(path_part) path_part = '%s%s' % (parts[0], new_ext)", "self._settings.coffee_bin, \"-c\", source_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() self._data = load_file(target_file)", "err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def compile_coffee(self): temp_path = tempfile.mkdtemp() source_file", "@staticmethod def supported_extensions(): return ['.eot', '.svg', '.ttf', '.woff'] @staticmethod def get_languages(settings): return settings.fonts.languages", "= None self._dependencies = [] self._tool_cache = Cache() self._flag_modified = False def is_partial(self,", "return self.get_target_path(lang=self._lang) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression,", "def get_languages(settings): return settings.fonts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'fonts') def _get_source_dir(self): return self._settings.fonts.source", "html.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.html\") save_file(source_file,", "def load(self): with io.open(self._path, 'r', encoding='utf-8') as f: self._data = f.read() def save(self,", "stdin=subprocess.PIPE, ) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path):", "get_languages(settings): return settings.images.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'images') def _get_source_dir(self): return self._settings.images.source def", "parse(self): self._parse() def dependencies_modified(self): for dep_asset in self._dependencies: if dep_asset._flag_modified: return True return", "def _get_partials_dir(self): return os.path.join(self._settings.partials, 'fonts') def _get_source_dir(self): return self._settings.fonts.source def _get_target_dir(self): return self._settings.fonts.target", "os.path.join(self._settings.partials, 'fonts') def _get_source_dir(self): return self._settings.fonts.source def _get_target_dir(self): return self._settings.fonts.target def get_asset_objects(path, settings):", "print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets = DependencyResolver.topological_sort(self._assets) if self._settings.verbose: print('Build order:\\n{collection}\\n'.format( collection=self._assets)) def build(self):", "False def compile(self, force=False): if self._resource_type == Asset.FILE: cache_entry = self._tool_cache.find_entry(self._path, self._lang) file_modified", "def get_languages(settings): return settings.scripts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'scripts') def _get_source_dir(self): return self._settings.scripts.source", "if cache_entry is None\\ else cache_entry.file_modified() or self.dependencies_modified() if file_modified or force: if", "self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.css\") save_file(source_file, self._data) target_file", "self._processor.compile(self._settings, target_path) if self._extension == '.coffee': if self._settings.verbose: print('Using CoffeeScript Compiler for {asset}'.format(asset=self))", "path, lang) self._data = None split = os.path.splitext(path) self._basename = split[0] self._extension =", "self._settings.htmlcompressor_file, \"--type\", \"html\", \"--mask\", \"*.html\", \"-o\", target_file, source_file, \"--remove-intertag-spaces\" ], stdout=subprocess.PIPE, stdin=subprocess.PIPE )", "os.path.splitext(path_part) new_filename = '%s-%s' % (parts[0], opts['hash']) path_part = '%s%s' % (new_filename, parts[1])", "return self._settings.stylesheets.target def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): self.load() self._processor = ExpressionProcessor(self,", "Compiler for {asset}'.format(asset=self)) self.compile_coffee() if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self))", "]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.css\") save_file(source_file, self._data)", "save_file(source_file, self._data) target_file = os.path.join(temp_path, \"source.js\") proc = subprocess.Popen( [ self._settings.coffee_bin, \"-c\", source_file", "os.path.join(self._settings.partials, 'stylesheets') def _get_source_dir(self): return self._settings.stylesheets.source def _get_target_dir(self): return self._settings.stylesheets.target def _get_target_path(self): return", "resource_type, path, lang): self._resource_type = resource_type self._path = path self._lang = lang self._collection", "os.path.commonprefix([ self._path, self._get_source_dir()]) return t.format(path=self._path[len(common_prefix) + 1:], lang=self._lang) def add_dependency(self, path, lang=None): dependency", "def compile_coffee(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.coffee\") save_file(source_file, self._data) target_file =", "def get_languages(settings): return settings.stylesheets.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'stylesheets') def _get_source_dir(self): return self._settings.stylesheets.source", "self._path, self._get_source_dir()]) path_part = self._path[len(common_prefix)+1:] if 'hash' in opts: parts = os.path.splitext(path_part) new_filename", "self._data = load_file(target_file) shutil.rmtree(temp_path) def compile_coffee(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.coffee\")", "return settings.scripts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'scripts') def _get_source_dir(self): return self._settings.scripts.source def _get_target_dir(self):", "if asset._path == path and asset._lang == lang: return asset return None def", "and asset._lang == lang: return asset return None def pick_dependencies(self): print('Found {count:d} assets'.format(count=len(self._assets)))", "self._data = None split = os.path.splitext(path) self._basename = split[0] self._extension = split[1] def", "_compile(self, target_path): self._processor.compile(self._settings, target_path) if self._extension == '.coffee': if self._settings.verbose: print('Using CoffeeScript Compiler", "= ExpressionProcessor(self, [ html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression ]) self._processor.parse()", "__repr__(self): if self._lang is None: t = '{path}' else: t = '{path} ({lang})'", "Asset.FILE: cache_entry = self._tool_cache.find_entry(self._path, self._lang) file_modified = True if cache_entry is None\\ else", "path, lang=None): super(TextAsset, self).__init__(Asset.FILE, path, lang) self._data = None split = os.path.splitext(path) self._basename", "self._dependencies: if dep_asset._flag_modified: return True return False def compile(self, force=False): if self._resource_type ==", "subprocess import tempfile class AssetCollection(object): def __init__(self, file_list, settings): self._assets = [] self._settings", "self._data) target_file = os.path.join(temp_path, \"target.js\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.yuicompressor_file, \"--type\",", "= os.path.join(self._get_target_dir(), path_part) return target_path def __repr__(self): if self._lang is None: t =", "def _compile(self, target_path): if not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path) class ImageAsset(BinaryAsset): def __init__(self,", "shutil.copy(self._path, target_path) class ImageAsset(BinaryAsset): def __init__(self, path, lang=None): super(ImageAsset, self).__init__(path, lang) @staticmethod def", "find_asset(self, path, lang): for asset in self._assets: if asset._path == path and asset._lang", "file_modified = True if cache_entry is None\\ else cache_entry.file_modified() or self.dependencies_modified() if file_modified", "if self._settings.verbose: print('Using CoffeeScript Compiler for {asset}'.format(asset=self)) self.compile_coffee() if self._settings.minify and not self.is_partial(target_path):", "= load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._settings.minify and not self.is_partial(target_path):", "source_file = os.path.join(temp_path, \"source.css\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.css\") proc = subprocess.Popen(", "self._dependencies = [] self._tool_cache = Cache() self._flag_modified = False def is_partial(self, path): return", "self._assets[-1]._settings = settings else: if res is None: continue self._assets.append(res) self._assets[-1]._collection = self", "parts[1]) if 'change_extension' in opts: new_ext = opts['change_extension'] parts = os.path.splitext(path_part) path_part =", "tempfile class AssetCollection(object): def __init__(self, file_list, settings): self._assets = [] self._settings = settings", "not(opts['lang'] is None): lang = opts['lang'] parts = os.path.splitext(path_part) path_part = '%s-%s%s' %", "in assets_unsorted: for dependency in asset._dependencies: if dependency in assets_unsorted: break else: acyclic", "return ['.html'] @staticmethod def get_languages(settings): return settings.html.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'html') def", "file_list: res = get_asset_objects(path, settings) if type(res) is list: for asset in res:", "path %s\" % path) def __eq__(self, other): return self._path == other._path and self._lang", "def supported_extensions(): return ['.png', '.jpg', '.gif'] @staticmethod def get_languages(settings): return settings.images.languages def _get_partials_dir(self):", "path, lang): self._resource_type = resource_type self._path = path self._lang = lang self._collection =", "= proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._settings.minify", "_get_target_dir(self): return self._settings.images.target class FontAsset(BinaryAsset): def __init__(self, path, lang=None): super(FontAsset, self).__init__(path, lang) @staticmethod", "asset in res: self._assets.append(asset) self._assets[-1]._collection = self self._assets[-1]._settings = settings else: if res", "for asset in self._assets: if asset._path == path and asset._lang == lang: return", "], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def", "in asset_class.supported_extensions(): langs = asset_class.get_languages(settings) if langs is None: return asset_class(path, None) else:", "order:\\n{collection}\\n'.format( collection=self._assets)) def build(self): print('Building assets...') for asset in self._assets: asset.compile(force=self._settings.force) print('Build done.')", "'hash' in opts: parts = os.path.splitext(path_part) new_filename = '%s-%s' % (parts[0], opts['hash']) path_part", "file_list, settings): self._assets = [] self._settings = settings for path in file_list: res", "dependency in assets_unsorted: break else: acyclic = True assets_unsorted.remove(asset) assets_sorted.append(asset) if not acyclic:", "path_part = self._path[len(common_prefix)+1:] if 'hash' in opts: parts = os.path.splitext(path_part) new_filename = '%s-%s'", "stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self,", "unique=True)) def _parse(self): pass def _compile(self, target_path): if not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path)", "self._lang) file_modified = True if cache_entry is None\\ else cache_entry.file_modified() or self.dependencies_modified() if", "return self._path != other._path and self._lang != other._lang def parse(self): self._parse() def dependencies_modified(self):", "], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def", "from .models import CacheEntry from .utils import get_file_hash, save_file, load_file import shutil import", "DependencyResolver(object): @staticmethod def topological_sort(assets_unsorted): assets_sorted = [] while len(assets_unsorted) > 0: acyclic =", "proc = subprocess.Popen( [ self._settings.coffee_bin, \"-c\", source_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err =", "self._assets: asset.parse() if self._settings.verbose: print(asset) print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets = DependencyResolver.topological_sort(self._assets) if self._settings.verbose:", "def topological_sort(assets_unsorted): assets_sorted = [] while len(assets_unsorted) > 0: acyclic = False for", "is_partial(self, path): return os.path.basename(path).startswith(\"_\") def get_target_path(self, **opts): common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) path_part", "= os.path.commonprefix([ self._path, self._get_source_dir()]) path_part = self._path[len(common_prefix)+1:] if 'hash' in opts: parts =", "io.open(self._path, 'r', encoding='utf-8') as f: self._data = f.read() def save(self, path): if not", "\"java\", \"-jar\", self._settings.htmlcompressor_file, \"--type\", \"html\", \"--mask\", \"*.html\", \"-o\", target_file, source_file, \"--remove-intertag-spaces\" ], stdout=subprocess.PIPE,", "out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path)", "{asset}'.format(asset=self)) self._flag_modified = True else: if self._settings.verbose: print('Cached {asset}'.format(asset=self)) else: print(\"String asset\") class", "'stylesheets') def _get_source_dir(self): return self._settings.stylesheets.source def _get_target_dir(self): return self._settings.stylesheets.target def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path,", "print(\"String asset\") class TextAsset(Asset): def __init__(self, path, lang=None): super(TextAsset, self).__init__(Asset.FILE, path, lang) self._data", "is None\\ else cache_entry.file_modified() or self.dependencies_modified() if file_modified or force: if cache_entry: if", "for dep_asset in self._dependencies: if dep_asset._flag_modified: return True return False def compile(self, force=False):", "= f.read() def save(self, path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path, self._data) class StylesheetAsset(TextAsset):", "= [ ImageAsset, FontAsset, StylesheetAsset, HtmlAsset, ScriptAsset ] file_ext = os.path.splitext(path)[1] for asset_class", "lang) self._data = None split = os.path.splitext(path) self._basename = split[0] self._extension = split[1]", "and self._lang == other._lang def __ne__(self, other): return self._path != other._path and self._lang", "force=False): if self._resource_type == Asset.FILE: cache_entry = self._tool_cache.find_entry(self._path, self._lang) file_modified = True if", "other._path and self._lang != other._lang def parse(self): self._parse() def dependencies_modified(self): for dep_asset in", "0: acyclic = False for asset in assets_unsorted: for dependency in asset._dependencies: if", "f.read() def save(self, path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path, self._data) class StylesheetAsset(TextAsset): @staticmethod", "print('Building assets...') for asset in self._assets: asset.compile(force=self._settings.force) print('Build done.') class DependencyResolver(object): @staticmethod def", "dependencies...\") for asset in self._assets: asset.parse() if self._settings.verbose: print(asset) print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets", "self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._settings.minify and not", "= subprocess.Popen( [ \"java\", \"-jar\", self._settings.htmlcompressor_file, \"--type\", \"html\", \"--mask\", \"*.html\", \"-o\", target_file, source_file,", "io from .compiler import ExpressionProcessor from .expressions import stylesheets, scripts, html import subprocess", "def get_languages(settings): return settings.html.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'html') def _get_source_dir(self): return self._settings.html.source", "return ['.css', '.scss'] @staticmethod def get_languages(settings): return settings.stylesheets.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'stylesheets')", "proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._settings.minify and", "path_part) else: target_path = os.path.join(self._get_target_dir(), path_part) return target_path def __repr__(self): if self._lang is", "get_languages(settings): return settings.scripts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'scripts') def _get_source_dir(self): return self._settings.scripts.source def", "opts and not(opts['lang'] is None): lang = opts['lang'] parts = os.path.splitext(path_part) path_part =", "def _parse(self): pass def _compile(self, target_path): if not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path) class", "'{path}' else: t = '{path} ({lang})' common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) return t.format(path=self._path[len(common_prefix)", "asset in assets_unsorted: for dependency in asset._dependencies: if dependency in assets_unsorted: break else:", ".cache import Cache from .models import CacheEntry from .utils import get_file_hash, save_file, load_file", "self._lang != other._lang def parse(self): self._parse() def dependencies_modified(self): for dep_asset in self._dependencies: if", "\"--type\", \"css\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err = proc.communicate()", "['.png', '.jpg', '.gif'] @staticmethod def get_languages(settings): return settings.images.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'images')", "if self._settings.verbose: print(\"Picking dependencies...\") for asset in self._assets: asset.parse() if self._settings.verbose: print(asset) print('Dependencies", "lang) if dependency: if dependency not in self._dependencies: self._dependencies.append(dependency) else: print(\"Couldn't find dependency", "stylesheets, scripts, html import subprocess import tempfile class AssetCollection(object): def __init__(self, file_list, settings):", "\"target.js\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"js\", \"-o\", target_file, source_file", "@staticmethod def topological_sort(assets_unsorted): assets_sorted = [] while len(assets_unsorted) > 0: acyclic = False", "lang=None): super(TextAsset, self).__init__(Asset.FILE, path, lang) self._data = None split = os.path.splitext(path) self._basename =", "[ \"java\", \"-jar\", self._settings.htmlcompressor_file, \"--type\", \"html\", \"--mask\", \"*.html\", \"-o\", target_file, source_file, \"--remove-intertag-spaces\" ],", "os.path.join(self._settings.partials, 'scripts') def _get_source_dir(self): return self._settings.scripts.source def _get_target_dir(self): return self._settings.scripts.target def _get_target_path(self): return", "= self._tool_cache.find_entry(self._path, self._lang) file_modified = True if cache_entry is None\\ else cache_entry.file_modified() or", "= self._collection.find_asset(path, lang) if dependency: if dependency not in self._dependencies: self._dependencies.append(dependency) else: print(\"Couldn't", "print(\"Couldn't find dependency with path %s\" % path) def __eq__(self, other): return self._path", "build(self): print('Building assets...') for asset in self._assets: asset.compile(force=self._settings.force) print('Build done.') class DependencyResolver(object): @staticmethod", "self._path != other._path and self._lang != other._lang def parse(self): self._parse() def dependencies_modified(self): for", "self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.html\") save_file(source_file, self._data) target_file", "stdin=subprocess.PIPE ) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path):", "self._lang == other._lang def __ne__(self, other): return self._path != other._path and self._lang !=", "@staticmethod def supported_extensions(): return ['.png', '.jpg', '.gif'] @staticmethod def get_languages(settings): return settings.images.languages def", "scripts.AppConfExpression, scripts.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.js\")", "= os.path.join(temp_path, \"source.html\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.html\") proc = subprocess.Popen( [", "return ['.png', '.jpg', '.gif'] @staticmethod def get_languages(settings): return settings.images.languages def _get_partials_dir(self): return os.path.join(self._settings.partials,", "= split[0] self._extension = split[1] def load(self): with io.open(self._path, 'r', encoding='utf-8') as f:", "get_file_hash, save_file, load_file import shutil import io from .compiler import ExpressionProcessor from .expressions", "DependencyResolver.topological_sort(self._assets) if self._settings.verbose: print('Build order:\\n{collection}\\n'.format( collection=self._assets)) def build(self): print('Building assets...') for asset in", "return settings.images.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'images') def _get_source_dir(self): return self._settings.images.source def _get_target_dir(self):", "is None: return asset_class(path, None) else: return [asset_class(path, lang) for lang in langs]", "= '%s-%s%s' % (parts[0], lang, parts[1]) if self.is_partial(path_part): target_path = os.path.join(self._get_partials_dir(), path_part) else:", "lang) @staticmethod def supported_extensions(): return ['.eot', '.svg', '.ttf', '.woff'] @staticmethod def get_languages(settings): return", "asset return None def pick_dependencies(self): print('Found {count:d} assets'.format(count=len(self._assets))) if self._settings.verbose: print(\"Picking dependencies...\") for", "return assets_sorted class Asset(object): FILE = 0 STRING = 1 def __init__(self, resource_type,", "dependency: if dependency not in self._dependencies: self._dependencies.append(dependency) else: print(\"Couldn't find dependency with path", "cyclic dependency occurred') return assets_sorted class Asset(object): FILE = 0 STRING = 1", "settings def find_asset(self, path, lang): for asset in self._assets: if asset._path == path", "== other._lang def __ne__(self, other): return self._path != other._path and self._lang != other._lang", "= opts['lang'] parts = os.path.splitext(path_part) path_part = '%s-%s%s' % (parts[0], lang, parts[1]) if", "path, lang): for asset in self._assets: if asset._path == path and asset._lang ==", "temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.coffee\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"source.js\")", "**opts): common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) path_part = self._path[len(common_prefix)+1:] if 'hash' in opts:", "minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.html\") save_file(source_file, self._data) target_file = os.path.join(temp_path,", "= True if cache_entry is None\\ else cache_entry.file_modified() or self.dependencies_modified() if file_modified or", "{asset}'.format(asset=self)) else: print(\"String asset\") class TextAsset(Asset): def __init__(self, path, lang=None): super(TextAsset, self).__init__(Asset.FILE, path,", "asset.parse() if self._settings.verbose: print(asset) print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets = DependencyResolver.topological_sort(self._assets) if self._settings.verbose: print('Build", "self._settings.fonts.target def get_asset_objects(path, settings): asset_classes = [ ImageAsset, FontAsset, StylesheetAsset, HtmlAsset, ScriptAsset ]", "subprocess.Popen( [ self._settings.coffee_bin, \"-c\", source_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() self._data", "if dep_asset._flag_modified: return True return False def compile(self, force=False): if self._resource_type == Asset.FILE:", "os.path.join(self._settings.partials, 'html') def _get_source_dir(self): return self._settings.html.source def _get_target_dir(self): return self._settings.html.target def _get_target_path(self): return", "other): return self._path != other._path and self._lang != other._lang def parse(self): self._parse() def", "% (parts[0], lang, parts[1]) if self.is_partial(path_part): target_path = os.path.join(self._get_partials_dir(), path_part) else: target_path =", "other._lang def __ne__(self, other): return self._path != other._path and self._lang != other._lang def", "= None split = os.path.splitext(path) self._basename = split[0] self._extension = split[1] def load(self):", "def _get_target_dir(self): return self._settings.images.target class FontAsset(BinaryAsset): def __init__(self, path, lang=None): super(FontAsset, self).__init__(path, lang)", "@staticmethod def get_languages(settings): return settings.scripts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'scripts') def _get_source_dir(self): return", ") def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression ])", "target_path) class ImageAsset(BinaryAsset): def __init__(self, path, lang=None): super(ImageAsset, self).__init__(path, lang) @staticmethod def supported_extensions():", "assets_unsorted: break else: acyclic = True assets_unsorted.remove(asset) assets_sorted.append(asset) if not acyclic: raise RuntimeError('A", "({lang})' common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) return t.format(path=self._path[len(common_prefix) + 1:], lang=self._lang) def add_dependency(self,", "def compile(self, force=False): if self._resource_type == Asset.FILE: cache_entry = self._tool_cache.find_entry(self._path, self._lang) file_modified =", "lang self._collection = None self._settings = None self._dependencies = [] self._tool_cache = Cache()", "self.save(target_path) class BinaryAsset(Asset): def __init__(self, path, lang=None): super(BinaryAsset, self).__init__(Asset.FILE, path, lang) def _get_target_path(self):", "\"java\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"js\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out,", "self._flag_modified = True else: if self._settings.verbose: print('Cached {asset}'.format(asset=self)) else: print(\"String asset\") class TextAsset(Asset):", "def __eq__(self, other): return self._path == other._path and self._lang == other._lang def __ne__(self,", "new_filename = '%s-%s' % (parts[0], opts['hash']) path_part = '%s%s' % (new_filename, parts[1]) if", "class ScriptAsset(TextAsset): @staticmethod def supported_extensions(): return ['.js', '.coffee'] @staticmethod def get_languages(settings): return settings.scripts.languages", "def build(self): print('Building assets...') for asset in self._assets: asset.compile(force=self._settings.force) print('Build done.') class DependencyResolver(object):", "lang = opts['lang'] parts = os.path.splitext(path_part) path_part = '%s-%s%s' % (parts[0], lang, parts[1])", "return os.path.join(self._settings.partials, 'fonts') def _get_source_dir(self): return self._settings.fonts.source def _get_target_dir(self): return self._settings.fonts.target def get_asset_objects(path,", "is None: t = '{path}' else: t = '{path} ({lang})' common_prefix = os.path.commonprefix([", "os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path = self._get_target_path() self._compile(target_path) if cache_entry: cache_entry.target = target_path self._tool_cache.update(cache_entry) print('Updated", "if file_ext in asset_class.supported_extensions(): langs = asset_class.get_languages(settings) if langs is None: return asset_class(path,", "= ExpressionProcessor(self, [ scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path =", "list: for asset in res: self._assets.append(asset) self._assets[-1]._collection = self self._assets[-1]._settings = settings else:", "= CacheEntry(self._path, target_path, self._lang) self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self)) self._flag_modified = True else: if self._settings.verbose:", "if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class ScriptAsset(TextAsset):", "= lang self._collection = None self._settings = None self._dependencies = [] self._tool_cache =", "res: self._assets.append(asset) self._assets[-1]._collection = self self._assets[-1]._settings = settings else: if res is None:", "return None def pick_dependencies(self): print('Found {count:d} assets'.format(count=len(self._assets))) if self._settings.verbose: print(\"Picking dependencies...\") for asset", "acyclic: raise RuntimeError('A cyclic dependency occurred') return assets_sorted class Asset(object): FILE = 0", "opts['lang'] parts = os.path.splitext(path_part) path_part = '%s-%s%s' % (parts[0], lang, parts[1]) if self.is_partial(path_part):", "target_path): self._processor.compile(self._settings, target_path) if self._extension == '.coffee': if self._settings.verbose: print('Using CoffeeScript Compiler for", "self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class HtmlAsset(TextAsset): @staticmethod", "lang) def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): pass def _compile(self, target_path): if", "_get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): pass def _compile(self, target_path): if not os.path.exists(os.path.dirname(target_path)):", "def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ stylesheets.ImageUrlExpression,", "self._extension == '.coffee': if self._settings.verbose: print('Using CoffeeScript Compiler for {asset}'.format(asset=self)) self.compile_coffee() if self._settings.minify", "{dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets = DependencyResolver.topological_sort(self._assets) if self._settings.verbose: print('Build order:\\n{collection}\\n'.format( collection=self._assets)) def build(self): print('Building", "os.path.splitext(path_part) path_part = '%s-%s%s' % (parts[0], lang, parts[1]) if self.is_partial(path_part): target_path = os.path.join(self._get_partials_dir(),", "self._path[len(common_prefix)+1:] if 'hash' in opts: parts = os.path.splitext(path_part) new_filename = '%s-%s' % (parts[0],", "self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.js\") save_file(source_file, self._data) target_file", "len(assets_unsorted) > 0: acyclic = False for asset in assets_unsorted: for dependency in", "None: t = '{path}' else: t = '{path} ({lang})' common_prefix = os.path.commonprefix([ self._path,", "[ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file =", "with path %s\" % path) def __eq__(self, other): return self._path == other._path and", "'.coffee'] @staticmethod def get_languages(settings): return settings.scripts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'scripts') def _get_source_dir(self):", "def pick_dependencies(self): print('Found {count:d} assets'.format(count=len(self._assets))) if self._settings.verbose: print(\"Picking dependencies...\") for asset in self._assets:", "html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path,", "(new_filename, parts[1]) if 'change_extension' in opts: new_ext = opts['change_extension'] parts = os.path.splitext(path_part) path_part", "self._data) class StylesheetAsset(TextAsset): @staticmethod def supported_extensions(): return ['.css', '.scss'] @staticmethod def get_languages(settings): return", "return target_path def __repr__(self): if self._lang is None: t = '{path}' else: t", "= Cache() self._flag_modified = False def is_partial(self, path): return os.path.basename(path).startswith(\"_\") def get_target_path(self, **opts):", "path and asset._lang == lang: return asset return None def pick_dependencies(self): print('Found {count:d}", "class StylesheetAsset(TextAsset): @staticmethod def supported_extensions(): return ['.css', '.scss'] @staticmethod def get_languages(settings): return settings.stylesheets.languages", "in asset_classes: if file_ext in asset_class.supported_extensions(): langs = asset_class.get_languages(settings) if langs is None:", "if self.is_partial(path_part): target_path = os.path.join(self._get_partials_dir(), path_part) else: target_path = os.path.join(self._get_target_dir(), path_part) return target_path", "self._resource_type == Asset.FILE: cache_entry = self._tool_cache.find_entry(self._path, self._lang) file_modified = True if cache_entry is", "self._settings.verbose: print(asset) print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets = DependencyResolver.topological_sort(self._assets) if self._settings.verbose: print('Build order:\\n{collection}\\n'.format( collection=self._assets))", "os.path.basename(path).startswith(\"_\") def get_target_path(self, **opts): common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) path_part = self._path[len(common_prefix)+1:] if", "self._processor = ExpressionProcessor(self, [ scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path", "load_file(target_file) shutil.rmtree(temp_path) def compile_coffee(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.coffee\") save_file(source_file, self._data)", "def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression,", "lang, parts[1]) if self.is_partial(path_part): target_path = os.path.join(self._get_partials_dir(), path_part) else: target_path = os.path.join(self._get_target_dir(), path_part)", "file_ext in asset_class.supported_extensions(): langs = asset_class.get_languages(settings) if langs is None: return asset_class(path, None)", "return os.path.join(self._settings.partials, 'scripts') def _get_source_dir(self): return self._settings.scripts.source def _get_target_dir(self): return self._settings.scripts.target def _get_target_path(self):", "lang): for asset in self._assets: if asset._path == path and asset._lang == lang:", "'.jpg', '.gif'] @staticmethod def get_languages(settings): return settings.images.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'images') def", "ImageAsset, FontAsset, StylesheetAsset, HtmlAsset, ScriptAsset ] file_ext = os.path.splitext(path)[1] for asset_class in asset_classes:", "self._tool_cache.find_entry(self._path, self._lang) file_modified = True if cache_entry is None\\ else cache_entry.file_modified() or self.dependencies_modified()", "self._data) target_file = os.path.join(temp_path, \"source.js\") proc = subprocess.Popen( [ self._settings.coffee_bin, \"-c\", source_file ],", "= '%s-%s' % (parts[0], opts['hash']) path_part = '%s%s' % (new_filename, parts[1]) if 'change_extension'", "dependency = self._collection.find_asset(path, lang) if dependency: if dependency not in self._dependencies: self._dependencies.append(dependency) else:", "return settings.stylesheets.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'stylesheets') def _get_source_dir(self): return self._settings.stylesheets.source def _get_target_dir(self):", "self._processor = ExpressionProcessor(self, [ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression ]) self._processor.parse() def minify(self): temp_path =", "not acyclic: raise RuntimeError('A cyclic dependency occurred') return assets_sorted class Asset(object): FILE =", "return os.path.join(self._settings.partials, 'html') def _get_source_dir(self): return self._settings.html.source def _get_target_dir(self): return self._settings.html.target def _get_target_path(self):", "if self._settings.verbose: print('Build order:\\n{collection}\\n'.format( collection=self._assets)) def build(self): print('Building assets...') for asset in self._assets:", "= True assets_unsorted.remove(asset) assets_sorted.append(asset) if not acyclic: raise RuntimeError('A cyclic dependency occurred') return", "= subprocess.Popen( [ self._settings.coffee_bin, \"-c\", source_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate()", "= '%s%s' % (new_filename, parts[1]) if 'change_extension' in opts: new_ext = opts['change_extension'] parts", "asset_classes = [ ImageAsset, FontAsset, StylesheetAsset, HtmlAsset, ScriptAsset ] file_ext = os.path.splitext(path)[1] for", "def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression ]) self._processor.parse()", "def _get_target_dir(self): return self._settings.stylesheets.target def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): self.load() self._processor", "self._settings.verbose: print('Using CoffeeScript Compiler for {asset}'.format(asset=self)) self.compile_coffee() if self._settings.minify and not self.is_partial(target_path): if", "return False def compile(self, force=False): if self._resource_type == Asset.FILE: cache_entry = self._tool_cache.find_entry(self._path, self._lang)", "def _get_target_path(self): return self.get_target_path( hash=get_file_hash(self._path, unique=True), change_extension='.js' ) def _parse(self): self.load() self._processor =", "tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.html\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.html\") proc =", "return self._settings.images.target class FontAsset(BinaryAsset): def __init__(self, path, lang=None): super(FontAsset, self).__init__(path, lang) @staticmethod def", "FontAsset(BinaryAsset): def __init__(self, path, lang=None): super(FontAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.eot',", "__init__(self, file_list, settings): self._assets = [] self._settings = settings for path in file_list:", "None: continue self._assets.append(res) self._assets[-1]._collection = self self._assets[-1]._settings = settings def find_asset(self, path, lang):", "return self._settings.html.source def _get_target_dir(self): return self._settings.html.target def _get_target_path(self): return self.get_target_path(lang=self._lang) def _parse(self): self.load()", "= tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.js\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.js\") proc", "== other._path and self._lang == other._lang def __ne__(self, other): return self._path != other._path", "parts = os.path.splitext(path_part) path_part = '%s-%s%s' % (parts[0], lang, parts[1]) if self.is_partial(path_part): target_path", "target_path def __repr__(self): if self._lang is None: t = '{path}' else: t =", "self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self)) else: cache_entry = CacheEntry(self._path, target_path, self._lang) self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self)) self._flag_modified", "\"source.js\") proc = subprocess.Popen( [ self._settings.coffee_bin, \"-c\", source_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err", "self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class BinaryAsset(Asset): def __init__(self, path, lang=None):", "os.path.join(temp_path, \"source.html\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.html\") proc = subprocess.Popen( [ \"java\",", "self._collection.find_asset(path, lang) if dependency: if dependency not in self._dependencies: self._dependencies.append(dependency) else: print(\"Couldn't find", "self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.eot', '.svg', '.ttf', '.woff'] @staticmethod def get_languages(settings):", "= opts['change_extension'] parts = os.path.splitext(path_part) path_part = '%s%s' % (parts[0], new_ext) if 'lang'", "__init__(self, path, lang=None): super(FontAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.eot', '.svg', '.ttf',", "> 0: acyclic = False for asset in assets_unsorted: for dependency in asset._dependencies:", "= proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def compile_coffee(self): temp_path = tempfile.mkdtemp() source_file =", "\"html\", \"--mask\", \"*.html\", \"-o\", target_file, source_file, \"--remove-intertag-spaces\" ], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) out, err", "self.load() self._processor = ExpressionProcessor(self, [ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression ]) self._processor.parse() def minify(self): temp_path", "def get_languages(settings): return settings.images.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'images') def _get_source_dir(self): return self._settings.images.source", "for asset_class in asset_classes: if file_ext in asset_class.supported_extensions(): langs = asset_class.get_languages(settings) if langs", "asset._dependencies: if dependency in assets_unsorted: break else: acyclic = True assets_unsorted.remove(asset) assets_sorted.append(asset) if", "'%s%s' % (parts[0], new_ext) if 'lang' in opts and not(opts['lang'] is None): lang", "from .compiler import ExpressionProcessor from .expressions import stylesheets, scripts, html import subprocess import", "supported_extensions(): return ['.js', '.coffee'] @staticmethod def get_languages(settings): return settings.scripts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials,", "\"target.css\") proc = subprocess.Popen( [ \"java\", \"-Xss100m\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"css\", \"-o\", target_file,", "self._path = path self._lang = lang self._collection = None self._settings = None self._dependencies", "load(self): with io.open(self._path, 'r', encoding='utf-8') as f: self._data = f.read() def save(self, path):", "dependencies=asset._dependencies)) self._assets = DependencyResolver.topological_sort(self._assets) if self._settings.verbose: print('Build order:\\n{collection}\\n'.format( collection=self._assets)) def build(self): print('Building assets...')", "lang=self._lang) def add_dependency(self, path, lang=None): dependency = self._collection.find_asset(path, lang) if dependency: if dependency", "for asset in res: self._assets.append(asset) self._assets[-1]._collection = self self._assets[-1]._settings = settings else: if", "= os.path.join(temp_path, \"source.js\") proc = subprocess.Popen( [ self._settings.coffee_bin, \"-c\", source_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "self._settings.verbose: print('Cached {asset}'.format(asset=self)) else: print(\"String asset\") class TextAsset(Asset): def __init__(self, path, lang=None): super(TextAsset,", "self._settings.fonts.source def _get_target_dir(self): return self._settings.fonts.target def get_asset_objects(path, settings): asset_classes = [ ImageAsset, FontAsset,", "stylesheets.FontUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.css\") save_file(source_file,", "print('Found {count:d} assets'.format(count=len(self._assets))) if self._settings.verbose: print(\"Picking dependencies...\") for asset in self._assets: asset.parse() if", "self.get_target_path( hash=get_file_hash(self._path, unique=True), change_extension='.js' ) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ scripts.IncludeExpression,", "parts = os.path.splitext(path_part) new_filename = '%s-%s' % (parts[0], opts['hash']) path_part = '%s%s' %", "(parts[0], new_ext) if 'lang' in opts and not(opts['lang'] is None): lang = opts['lang']", "= self._get_target_path() self._compile(target_path) if cache_entry: cache_entry.target = target_path self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self)) else: cache_entry", "self.minify() self.save(target_path) class ScriptAsset(TextAsset): @staticmethod def supported_extensions(): return ['.js', '.coffee'] @staticmethod def get_languages(settings):", "return ['.js', '.coffee'] @staticmethod def get_languages(settings): return settings.scripts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'scripts')", "dependencies_modified(self): for dep_asset in self._dependencies: if dep_asset._flag_modified: return True return False def compile(self,", "asset_class in asset_classes: if file_ext in asset_class.supported_extensions(): langs = asset_class.get_languages(settings) if langs is", "'.gif'] @staticmethod def get_languages(settings): return settings.images.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'images') def _get_source_dir(self):", "source_file = os.path.join(temp_path, \"source.coffee\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"source.js\") proc = subprocess.Popen(", "if type(res) is list: for asset in res: self._assets.append(asset) self._assets[-1]._collection = self self._assets[-1]._settings", "'%s-%s%s' % (parts[0], lang, parts[1]) if self.is_partial(path_part): target_path = os.path.join(self._get_partials_dir(), path_part) else: target_path", "minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.css\") save_file(source_file, self._data) target_file = os.path.join(temp_path,", "print('Updated {asset}'.format(asset=self)) else: cache_entry = CacheEntry(self._path, target_path, self._lang) self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self)) self._flag_modified =", "None self._dependencies = [] self._tool_cache = Cache() self._flag_modified = False def is_partial(self, path):", "lang: return asset return None def pick_dependencies(self): print('Found {count:d} assets'.format(count=len(self._assets))) if self._settings.verbose: print(\"Picking", "in opts: new_ext = opts['change_extension'] parts = os.path.splitext(path_part) path_part = '%s%s' % (parts[0],", "= os.path.splitext(path_part) path_part = '%s%s' % (parts[0], new_ext) if 'lang' in opts and", "StylesheetAsset(TextAsset): @staticmethod def supported_extensions(): return ['.css', '.scss'] @staticmethod def get_languages(settings): return settings.stylesheets.languages def", "_get_partials_dir(self): return os.path.join(self._settings.partials, 'fonts') def _get_source_dir(self): return self._settings.fonts.source def _get_target_dir(self): return self._settings.fonts.target def", "print('Using CoffeeScript Compiler for {asset}'.format(asset=self)) self.compile_coffee() if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose:", "acyclic = True assets_unsorted.remove(asset) assets_sorted.append(asset) if not acyclic: raise RuntimeError('A cyclic dependency occurred')", "encoding='utf-8') as f: self._data = f.read() def save(self, path): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path))", "def get_asset_objects(path, settings): asset_classes = [ ImageAsset, FontAsset, StylesheetAsset, HtmlAsset, ScriptAsset ] file_ext", "proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"js\", \"-o\", target_file, source_file ],", "_get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression,", "'html') def _get_source_dir(self): return self._settings.html.source def _get_target_dir(self): return self._settings.html.target def _get_target_path(self): return self.get_target_path(lang=self._lang)", "resource_type self._path = path self._lang = lang self._collection = None self._settings = None", "def __init__(self, path, lang=None): super(BinaryAsset, self).__init__(Asset.FILE, path, lang) def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True))", "ScriptAsset ] file_ext = os.path.splitext(path)[1] for asset_class in asset_classes: if file_ext in asset_class.supported_extensions():", "cache_entry is None\\ else cache_entry.file_modified() or self.dependencies_modified() if file_modified or force: if cache_entry:", "print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class BinaryAsset(Asset): def __init__(self, path, lang=None): super(BinaryAsset, self).__init__(Asset.FILE, path,", "in self._assets: asset.parse() if self._settings.verbose: print(asset) print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets = DependencyResolver.topological_sort(self._assets) if", "self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class ScriptAsset(TextAsset): @staticmethod", "os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path) class ImageAsset(BinaryAsset): def __init__(self, path, lang=None): super(ImageAsset, self).__init__(path, lang) @staticmethod", "= os.path.splitext(path_part) new_filename = '%s-%s' % (parts[0], opts['hash']) path_part = '%s%s' % (new_filename,", "return self._settings.stylesheets.source def _get_target_dir(self): return self._settings.stylesheets.target def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self):", "os.path.splitext(path)[1] for asset_class in asset_classes: if file_ext in asset_class.supported_extensions(): langs = asset_class.get_languages(settings) if", "path, lang=None): super(BinaryAsset, self).__init__(Asset.FILE, path, lang) def _get_target_path(self): return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self):", "in opts and not(opts['lang'] is None): lang = opts['lang'] parts = os.path.splitext(path_part) path_part", "not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class ScriptAsset(TextAsset): @staticmethod def supported_extensions():", "= get_asset_objects(path, settings) if type(res) is list: for asset in res: self._assets.append(asset) self._assets[-1]._collection", "self._flag_modified = False def is_partial(self, path): return os.path.basename(path).startswith(\"_\") def get_target_path(self, **opts): common_prefix =", "os.path.join(temp_path, \"source.js\") proc = subprocess.Popen( [ self._settings.coffee_bin, \"-c\", source_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out,", "def _get_partials_dir(self): return os.path.join(self._settings.partials, 'images') def _get_source_dir(self): return self._settings.images.source def _get_target_dir(self): return self._settings.images.target", "path_part = '%s%s' % (parts[0], new_ext) if 'lang' in opts and not(opts['lang'] is", "_get_partials_dir(self): return os.path.join(self._settings.partials, 'stylesheets') def _get_source_dir(self): return self._settings.stylesheets.source def _get_target_dir(self): return self._settings.stylesheets.target def", "if 'change_extension' in opts: new_ext = opts['change_extension'] parts = os.path.splitext(path_part) path_part = '%s%s'", "= os.path.splitext(path_part) path_part = '%s-%s%s' % (parts[0], lang, parts[1]) if self.is_partial(path_part): target_path =", "os.makedirs(os.path.dirname(path)) save_file(path, self._data) class StylesheetAsset(TextAsset): @staticmethod def supported_extensions(): return ['.css', '.scss'] @staticmethod def", "== path and asset._lang == lang: return asset return None def pick_dependencies(self): print('Found", "self.compile_coffee() if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class", "self.minify() self.save(target_path) class HtmlAsset(TextAsset): @staticmethod def supported_extensions(): return ['.html'] @staticmethod def get_languages(settings): return", "cache_entry: if os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path = self._get_target_path() self._compile(target_path) if cache_entry: cache_entry.target = target_path", "self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class BinaryAsset(Asset): def", "= os.path.join(temp_path, \"source.js\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.js\") proc = subprocess.Popen( [", "True else: if self._settings.verbose: print('Cached {asset}'.format(asset=self)) else: print(\"String asset\") class TextAsset(Asset): def __init__(self,", "FontAsset, StylesheetAsset, HtmlAsset, ScriptAsset ] file_ext = os.path.splitext(path)[1] for asset_class in asset_classes: if", "True return False def compile(self, force=False): if self._resource_type == Asset.FILE: cache_entry = self._tool_cache.find_entry(self._path,", "from .cache import Cache from .models import CacheEntry from .utils import get_file_hash, save_file,", "res is None: continue self._assets.append(res) self._assets[-1]._collection = self self._assets[-1]._settings = settings def find_asset(self,", "class ImageAsset(BinaryAsset): def __init__(self, path, lang=None): super(ImageAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return", "target_file, source_file, \"--remove-intertag-spaces\" ], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) out, err = proc.communicate() self._data =", "target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err = proc.communicate() self._data = load_file(target_file)", "self.minify() self.save(target_path) class BinaryAsset(Asset): def __init__(self, path, lang=None): super(BinaryAsset, self).__init__(Asset.FILE, path, lang) def", "__init__(self, path, lang=None): super(ImageAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.png', '.jpg', '.gif']", "tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.js\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.js\") proc =", "import Cache from .models import CacheEntry from .utils import get_file_hash, save_file, load_file import", "== lang: return asset return None def pick_dependencies(self): print('Found {count:d} assets'.format(count=len(self._assets))) if self._settings.verbose:", "else: target_path = os.path.join(self._get_target_dir(), path_part) return target_path def __repr__(self): if self._lang is None:", "None): lang = opts['lang'] parts = os.path.splitext(path_part) path_part = '%s-%s%s' % (parts[0], lang,", "_parse(self): self.load() self._processor = ExpressionProcessor(self, [ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression ]) self._processor.parse() def minify(self):", "self._settings.verbose: print(\"Picking dependencies...\") for asset in self._assets: asset.parse() if self._settings.verbose: print(asset) print('Dependencies {dependencies}\\n'.format(", "pass def _compile(self, target_path): if not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path, target_path) class ImageAsset(BinaryAsset): def", "for asset in self._assets: asset.parse() if self._settings.verbose: print(asset) print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets =", "target_path, self._lang) self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self)) self._flag_modified = True else: if self._settings.verbose: print('Cached {asset}'.format(asset=self))", "path in file_list: res = get_asset_objects(path, settings) if type(res) is list: for asset", "None self._settings = None self._dependencies = [] self._tool_cache = Cache() self._flag_modified = False", "assets'.format(count=len(self._assets))) if self._settings.verbose: print(\"Picking dependencies...\") for asset in self._assets: asset.parse() if self._settings.verbose: print(asset)", "from .utils import get_file_hash, save_file, load_file import shutil import io from .compiler import", "self._assets[-1]._settings = settings def find_asset(self, path, lang): for asset in self._assets: if asset._path", "stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path):", "def _get_target_dir(self): return self._settings.scripts.target def _get_target_path(self): return self.get_target_path( hash=get_file_hash(self._path, unique=True), change_extension='.js' ) def", "target_path) if self._extension == '.coffee': if self._settings.verbose: print('Using CoffeeScript Compiler for {asset}'.format(asset=self)) self.compile_coffee()", "def _get_source_dir(self): return self._settings.fonts.source def _get_target_dir(self): return self._settings.fonts.target def get_asset_objects(path, settings): asset_classes =", "Asset(object): FILE = 0 STRING = 1 def __init__(self, resource_type, path, lang): self._resource_type", "scripts.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.js\") save_file(source_file,", "import ExpressionProcessor from .expressions import stylesheets, scripts, html import subprocess import tempfile class", "print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class HtmlAsset(TextAsset): @staticmethod def supported_extensions(): return ['.html'] @staticmethod def", "import shutil import io from .compiler import ExpressionProcessor from .expressions import stylesheets, scripts,", "source_file = os.path.join(temp_path, \"source.html\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.html\") proc = subprocess.Popen(", "= None self._settings = None self._dependencies = [] self._tool_cache = Cache() self._flag_modified =", "assets_sorted = [] while len(assets_unsorted) > 0: acyclic = False for asset in", "self._resource_type = resource_type self._path = path self._lang = lang self._collection = None self._settings", "html.I18nTemplateExpression, html.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.html\")", "stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path,", "not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class HtmlAsset(TextAsset): @staticmethod def supported_extensions():", "'{path} ({lang})' common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) return t.format(path=self._path[len(common_prefix) + 1:], lang=self._lang) def", "_get_target_path(self): return self.get_target_path(lang=self._lang) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression,", "class DependencyResolver(object): @staticmethod def topological_sort(assets_unsorted): assets_sorted = [] while len(assets_unsorted) > 0: acyclic", "= 0 STRING = 1 def __init__(self, resource_type, path, lang): self._resource_type = resource_type", "dependency not in self._dependencies: self._dependencies.append(dependency) else: print(\"Couldn't find dependency with path %s\" %", "_compile(self, target_path): self._processor.compile(self._settings, target_path) if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self))", "\"--type\", \"js\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err = proc.communicate()", "if cache_entry: if os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path = self._get_target_path() self._compile(target_path) if cache_entry: cache_entry.target =", "'.svg', '.ttf', '.woff'] @staticmethod def get_languages(settings): return settings.fonts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'fonts')", "_get_target_dir(self): return self._settings.html.target def _get_target_path(self): return self.get_target_path(lang=self._lang) def _parse(self): self.load() self._processor = ExpressionProcessor(self,", "def _get_target_dir(self): return self._settings.fonts.target def get_asset_objects(path, settings): asset_classes = [ ImageAsset, FontAsset, StylesheetAsset,", "acyclic = False for asset in assets_unsorted: for dependency in asset._dependencies: if dependency", "os.path.join(temp_path, \"source.coffee\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"source.js\") proc = subprocess.Popen( [ self._settings.coffee_bin,", "print(\"Picking dependencies...\") for asset in self._assets: asset.parse() if self._settings.verbose: print(asset) print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies))", "subprocess.Popen( [ \"java\", \"-jar\", self._settings.htmlcompressor_file, \"--type\", \"html\", \"--mask\", \"*.html\", \"-o\", target_file, source_file, \"--remove-intertag-spaces\"", "compile_coffee(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.coffee\") save_file(source_file, self._data) target_file = os.path.join(temp_path,", "'fonts') def _get_source_dir(self): return self._settings.fonts.source def _get_target_dir(self): return self._settings.fonts.target def get_asset_objects(path, settings): asset_classes", "in asset._dependencies: if dependency in assets_unsorted: break else: acyclic = True assets_unsorted.remove(asset) assets_sorted.append(asset)", "= [] self._settings = settings for path in file_list: res = get_asset_objects(path, settings)", "os.path.join(temp_path, \"source.css\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.css\") proc = subprocess.Popen( [ \"java\",", "target_path): self._processor.compile(self._settings, target_path) if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify()", "None\\ else cache_entry.file_modified() or self.dependencies_modified() if file_modified or force: if cache_entry: if os.path.exists(cache_entry.target):", "_parse(self): self.load() self._processor = ExpressionProcessor(self, [ html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression,", "return asset return None def pick_dependencies(self): print('Found {count:d} assets'.format(count=len(self._assets))) if self._settings.verbose: print(\"Picking dependencies...\")", "continue self._assets.append(res) self._assets[-1]._collection = self self._assets[-1]._settings = settings def find_asset(self, path, lang): for", "= os.path.join(temp_path, \"target.js\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"js\", \"-o\",", "if not acyclic: raise RuntimeError('A cyclic dependency occurred') return assets_sorted class Asset(object): FILE", "self.save(target_path) class HtmlAsset(TextAsset): @staticmethod def supported_extensions(): return ['.html'] @staticmethod def get_languages(settings): return settings.html.languages", "other): return self._path == other._path and self._lang == other._lang def __ne__(self, other): return", "if self._lang is None: t = '{path}' else: t = '{path} ({lang})' common_prefix", "return self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression", "self.dependencies_modified() if file_modified or force: if cache_entry: if os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path = self._get_target_path()", "self._assets: asset.compile(force=self._settings.force) print('Build done.') class DependencyResolver(object): @staticmethod def topological_sort(assets_unsorted): assets_sorted = [] while", "asset\") class TextAsset(Asset): def __init__(self, path, lang=None): super(TextAsset, self).__init__(Asset.FILE, path, lang) self._data =", "raise RuntimeError('A cyclic dependency occurred') return assets_sorted class Asset(object): FILE = 0 STRING", "if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class HtmlAsset(TextAsset): @staticmethod def supported_extensions(): return ['.html']", "langs is None: return asset_class(path, None) else: return [asset_class(path, lang) for lang in", "unique=True)) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ stylesheets.ImageUrlExpression, stylesheets.IncludeExpression, stylesheets.FontUrlExpression ]) self._processor.parse()", "\"--remove-intertag-spaces\" ], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path)", "path, lang=None): super(FontAsset, self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.eot', '.svg', '.ttf', '.woff']", "'r', encoding='utf-8') as f: self._data = f.read() def save(self, path): if not os.path.exists(os.path.dirname(path)):", "if os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path = self._get_target_path() self._compile(target_path) if cache_entry: cache_entry.target = target_path self._tool_cache.update(cache_entry)", "self.load() self._processor = ExpressionProcessor(self, [ scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression ]) self._processor.parse() def minify(self):", "= os.path.splitext(path)[1] for asset_class in asset_classes: if file_ext in asset_class.supported_extensions(): langs = asset_class.get_languages(settings)", "assets...') for asset in self._assets: asset.compile(force=self._settings.force) print('Build done.') class DependencyResolver(object): @staticmethod def topological_sort(assets_unsorted):", "settings) if type(res) is list: for asset in res: self._assets.append(asset) self._assets[-1]._collection = self", "target_file = os.path.join(temp_path, \"target.js\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"js\",", "= os.path.join(temp_path, \"source.coffee\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"source.js\") proc = subprocess.Popen( [", "for path in file_list: res = get_asset_objects(path, settings) if type(res) is list: for", "html import subprocess import tempfile class AssetCollection(object): def __init__(self, file_list, settings): self._assets =", "scripts, html import subprocess import tempfile class AssetCollection(object): def __init__(self, file_list, settings): self._assets", "class AssetCollection(object): def __init__(self, file_list, settings): self._assets = [] self._settings = settings for", "import os from .cache import Cache from .models import CacheEntry from .utils import", "_get_source_dir(self): return self._settings.html.source def _get_target_dir(self): return self._settings.html.target def _get_target_path(self): return self.get_target_path(lang=self._lang) def _parse(self):", "is list: for asset in res: self._assets.append(asset) self._assets[-1]._collection = self self._assets[-1]._settings = settings", "def _get_target_dir(self): return self._settings.html.target def _get_target_path(self): return self.get_target_path(lang=self._lang) def _parse(self): self.load() self._processor =", "html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file", "@staticmethod def get_languages(settings): return settings.fonts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'fonts') def _get_source_dir(self): return", "settings for path in file_list: res = get_asset_objects(path, settings) if type(res) is list:", "{asset}'.format(asset=self)) else: cache_entry = CacheEntry(self._path, target_path, self._lang) self._tool_cache.add(cache_entry) print('Created {asset}'.format(asset=self)) self._flag_modified = True", "lang=None): dependency = self._collection.find_asset(path, lang) if dependency: if dependency not in self._dependencies: self._dependencies.append(dependency)", "__eq__(self, other): return self._path == other._path and self._lang == other._lang def __ne__(self, other):", "not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path, self._data) class StylesheetAsset(TextAsset): @staticmethod def supported_extensions(): return ['.css', '.scss']", "assets_sorted class Asset(object): FILE = 0 STRING = 1 def __init__(self, resource_type, path,", "= resource_type self._path = path self._lang = lang self._collection = None self._settings =", "\"source.html\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.html\") proc = subprocess.Popen( [ \"java\", \"-jar\",", "'%s%s' % (new_filename, parts[1]) if 'change_extension' in opts: new_ext = opts['change_extension'] parts =", "dependency in asset._dependencies: if dependency in assets_unsorted: break else: acyclic = True assets_unsorted.remove(asset)", "cache_entry = self._tool_cache.find_entry(self._path, self._lang) file_modified = True if cache_entry is None\\ else cache_entry.file_modified()", "= split[1] def load(self): with io.open(self._path, 'r', encoding='utf-8') as f: self._data = f.read()", ") out, err = proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def compile_coffee(self): temp_path =", "self._basename = split[0] self._extension = split[1] def load(self): with io.open(self._path, 'r', encoding='utf-8') as", "= os.path.join(temp_path, \"target.html\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.htmlcompressor_file, \"--type\", \"html\", \"--mask\",", "= os.path.join(self._get_partials_dir(), path_part) else: target_path = os.path.join(self._get_target_dir(), path_part) return target_path def __repr__(self): if", "asset._lang == lang: return asset return None def pick_dependencies(self): print('Found {count:d} assets'.format(count=len(self._assets))) if", "def supported_extensions(): return ['.js', '.coffee'] @staticmethod def get_languages(settings): return settings.scripts.languages def _get_partials_dir(self): return", "= 1 def __init__(self, resource_type, path, lang): self._resource_type = resource_type self._path = path", "self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class HtmlAsset(TextAsset): @staticmethod def supported_extensions(): return", "else: if self._settings.verbose: print('Cached {asset}'.format(asset=self)) else: print(\"String asset\") class TextAsset(Asset): def __init__(self, path,", "= DependencyResolver.topological_sort(self._assets) if self._settings.verbose: print('Build order:\\n{collection}\\n'.format( collection=self._assets)) def build(self): print('Building assets...') for asset", "load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._settings.minify and not self.is_partial(target_path): if", "os.path.join(self._settings.partials, 'images') def _get_source_dir(self): return self._settings.images.source def _get_target_dir(self): return self._settings.images.target class FontAsset(BinaryAsset): def", "import io from .compiler import ExpressionProcessor from .expressions import stylesheets, scripts, html import", "asset.compile(force=self._settings.force) print('Build done.') class DependencyResolver(object): @staticmethod def topological_sort(assets_unsorted): assets_sorted = [] while len(assets_unsorted)", "proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.htmlcompressor_file, \"--type\", \"html\", \"--mask\", \"*.html\", \"-o\", target_file,", "= settings for path in file_list: res = get_asset_objects(path, settings) if type(res) is", "os.path.commonprefix([ self._path, self._get_source_dir()]) path_part = self._path[len(common_prefix)+1:] if 'hash' in opts: parts = os.path.splitext(path_part)", "assets_sorted.append(asset) if not acyclic: raise RuntimeError('A cyclic dependency occurred') return assets_sorted class Asset(object):", "self).__init__(path, lang) @staticmethod def supported_extensions(): return ['.png', '.jpg', '.gif'] @staticmethod def get_languages(settings): return", "for asset in self._assets: asset.compile(force=self._settings.force) print('Build done.') class DependencyResolver(object): @staticmethod def topological_sort(assets_unsorted): assets_sorted", "def dependencies_modified(self): for dep_asset in self._dependencies: if dep_asset._flag_modified: return True return False def", "shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._settings.minify and not self.is_partial(target_path): if self._settings.verbose:", "def supported_extensions(): return ['.html'] @staticmethod def get_languages(settings): return settings.html.languages def _get_partials_dir(self): return os.path.join(self._settings.partials,", "= target_path self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self)) else: cache_entry = CacheEntry(self._path, target_path, self._lang) self._tool_cache.add(cache_entry) print('Created", "target_path = self._get_target_path() self._compile(target_path) if cache_entry: cache_entry.target = target_path self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self)) else:", "asset._path == path and asset._lang == lang: return asset return None def pick_dependencies(self):", "1 def __init__(self, resource_type, path, lang): self._resource_type = resource_type self._path = path self._lang", "self._tool_cache = Cache() self._flag_modified = False def is_partial(self, path): return os.path.basename(path).startswith(\"_\") def get_target_path(self,", "self._settings.scripts.target def _get_target_path(self): return self.get_target_path( hash=get_file_hash(self._path, unique=True), change_extension='.js' ) def _parse(self): self.load() self._processor", "def supported_extensions(): return ['.css', '.scss'] @staticmethod def get_languages(settings): return settings.stylesheets.languages def _get_partials_dir(self): return", "import CacheEntry from .utils import get_file_hash, save_file, load_file import shutil import io from", "parts = os.path.splitext(path_part) path_part = '%s%s' % (parts[0], new_ext) if 'lang' in opts", "target_file = os.path.join(temp_path, \"target.css\") proc = subprocess.Popen( [ \"java\", \"-Xss100m\", \"-jar\", self._settings.yuicompressor_file, \"--type\",", "if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) save_file(path, self._data) class StylesheetAsset(TextAsset): @staticmethod def supported_extensions(): return ['.css',", ".models import CacheEntry from .utils import get_file_hash, save_file, load_file import shutil import io", "save_file(path, self._data) class StylesheetAsset(TextAsset): @staticmethod def supported_extensions(): return ['.css', '.scss'] @staticmethod def get_languages(settings):", "os.path.join(temp_path, \"target.js\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"js\", \"-o\", target_file,", "def _get_source_dir(self): return self._settings.html.source def _get_target_dir(self): return self._settings.html.target def _get_target_path(self): return self.get_target_path(lang=self._lang) def", "or force: if cache_entry: if os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path = self._get_target_path() self._compile(target_path) if cache_entry:", "class Asset(object): FILE = 0 STRING = 1 def __init__(self, resource_type, path, lang):", "if 'lang' in opts and not(opts['lang'] is None): lang = opts['lang'] parts =", "'lang' in opts and not(opts['lang'] is None): lang = opts['lang'] parts = os.path.splitext(path_part)", "== '.coffee': if self._settings.verbose: print('Using CoffeeScript Compiler for {asset}'.format(asset=self)) self.compile_coffee() if self._settings.minify and", "['.html'] @staticmethod def get_languages(settings): return settings.html.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'html') def _get_source_dir(self):", "\"-jar\", self._settings.yuicompressor_file, \"--type\", \"css\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err", "Cache from .models import CacheEntry from .utils import get_file_hash, save_file, load_file import shutil", "_get_source_dir(self): return self._settings.fonts.source def _get_target_dir(self): return self._settings.fonts.target def get_asset_objects(path, settings): asset_classes = [", "break else: acyclic = True assets_unsorted.remove(asset) assets_sorted.append(asset) if not acyclic: raise RuntimeError('A cyclic", "self._get_target_path() self._compile(target_path) if cache_entry: cache_entry.target = target_path self._tool_cache.update(cache_entry) print('Updated {asset}'.format(asset=self)) else: cache_entry =", "asset in self._assets: asset.compile(force=self._settings.force) print('Build done.') class DependencyResolver(object): @staticmethod def topological_sort(assets_unsorted): assets_sorted =", "[ scripts.IncludeExpression, scripts.ScriptUrlExpression, scripts.AppConfExpression, scripts.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file", "return self._settings.html.target def _get_target_path(self): return self.get_target_path(lang=self._lang) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [", "self._settings.html.target def _get_target_path(self): return self.get_target_path(lang=self._lang) def _parse(self): self.load() self._processor = ExpressionProcessor(self, [ html.IncludeExpression,", "save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.html\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.htmlcompressor_file,", "get_languages(settings): return settings.fonts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'fonts') def _get_source_dir(self): return self._settings.fonts.source def", "not self.is_partial(target_path): if self._settings.verbose: print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class BinaryAsset(Asset): def __init__(self, path,", "html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path = tempfile.mkdtemp() source_file =", "if file_modified or force: if cache_entry: if os.path.exists(cache_entry.target): os.remove(cache_entry.target) target_path = self._get_target_path() self._compile(target_path)", "HtmlAsset(TextAsset): @staticmethod def supported_extensions(): return ['.html'] @staticmethod def get_languages(settings): return settings.html.languages def _get_partials_dir(self):", "in self._dependencies: self._dependencies.append(dependency) else: print(\"Couldn't find dependency with path %s\" % path) def", "'.ttf', '.woff'] @staticmethod def get_languages(settings): return settings.fonts.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'fonts') def", "\"-Xss100m\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"css\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out,", "if dependency not in self._dependencies: self._dependencies.append(dependency) else: print(\"Couldn't find dependency with path %s\"", "asset_class.supported_extensions(): langs = asset_class.get_languages(settings) if langs is None: return asset_class(path, None) else: return", "print('Minifying {asset}'.format(asset=self)) self.minify() self.save(target_path) class ScriptAsset(TextAsset): @staticmethod def supported_extensions(): return ['.js', '.coffee'] @staticmethod", "def get_target_path(self, **opts): common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) path_part = self._path[len(common_prefix)+1:] if 'hash'", "return os.path.basename(path).startswith(\"_\") def get_target_path(self, **opts): common_prefix = os.path.commonprefix([ self._path, self._get_source_dir()]) path_part = self._path[len(common_prefix)+1:]", "self._path == other._path and self._lang == other._lang def __ne__(self, other): return self._path !=", "self._settings.verbose: print('Build order:\\n{collection}\\n'.format( collection=self._assets)) def build(self): print('Building assets...') for asset in self._assets: asset.compile(force=self._settings.force)", "def find_asset(self, path, lang): for asset in self._assets: if asset._path == path and", "lang): self._resource_type = resource_type self._path = path self._lang = lang self._collection = None", "= asset_class.get_languages(settings) if langs is None: return asset_class(path, None) else: return [asset_class(path, lang)", "return asset_class(path, None) else: return [asset_class(path, lang) for lang in langs] return None", "self._get_source_dir()]) return t.format(path=self._path[len(common_prefix) + 1:], lang=self._lang) def add_dependency(self, path, lang=None): dependency = self._collection.find_asset(path,", "!= other._lang def parse(self): self._parse() def dependencies_modified(self): for dep_asset in self._dependencies: if dep_asset._flag_modified:", "if self._settings.verbose: print(asset) print('Dependencies {dependencies}\\n'.format( dependencies=asset._dependencies)) self._assets = DependencyResolver.topological_sort(self._assets) if self._settings.verbose: print('Build order:\\n{collection}\\n'.format(", "% (new_filename, parts[1]) if 'change_extension' in opts: new_ext = opts['change_extension'] parts = os.path.splitext(path_part)", "os.path.splitext(path) self._basename = split[0] self._extension = split[1] def load(self): with io.open(self._path, 'r', encoding='utf-8')", "tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.css\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.css\") proc =", "settings else: if res is None: continue self._assets.append(res) self._assets[-1]._collection = self self._assets[-1]._settings =", "self.get_target_path(hash=get_file_hash(self._path, unique=True)) def _parse(self): pass def _compile(self, target_path): if not os.path.exists(os.path.dirname(target_path)): os.makedirs(os.path.dirname(target_path)) shutil.copy(self._path,", "proc.communicate() self._data = load_file(target_file) shutil.rmtree(temp_path) def _compile(self, target_path): self._processor.compile(self._settings, target_path) if self._extension ==", "HtmlAsset, ScriptAsset ] file_ext = os.path.splitext(path)[1] for asset_class in asset_classes: if file_ext in", "self._parse() def dependencies_modified(self): for dep_asset in self._dependencies: if dep_asset._flag_modified: return True return False", "shutil.rmtree(temp_path) def compile_coffee(self): temp_path = tempfile.mkdtemp() source_file = os.path.join(temp_path, \"source.coffee\") save_file(source_file, self._data) target_file", "if 'hash' in opts: parts = os.path.splitext(path_part) new_filename = '%s-%s' % (parts[0], opts['hash'])", "return t.format(path=self._path[len(common_prefix) + 1:], lang=self._lang) def add_dependency(self, path, lang=None): dependency = self._collection.find_asset(path, lang)", "False for asset in assets_unsorted: for dependency in asset._dependencies: if dependency in assets_unsorted:", "\"source.css\") save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.css\") proc = subprocess.Popen( [ \"java\", \"-Xss100m\",", "type(res) is list: for asset in res: self._assets.append(asset) self._assets[-1]._collection = self self._assets[-1]._settings =", "{asset}'.format(asset=self)) self.minify() self.save(target_path) class HtmlAsset(TextAsset): @staticmethod def supported_extensions(): return ['.html'] @staticmethod def get_languages(settings):", "settings.stylesheets.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'stylesheets') def _get_source_dir(self): return self._settings.stylesheets.source def _get_target_dir(self): return", "[ html.IncludeExpression, html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression ]) self._processor.parse() def minify(self):", "{count:d} assets'.format(count=len(self._assets))) if self._settings.verbose: print(\"Picking dependencies...\") for asset in self._assets: asset.parse() if self._settings.verbose:", "_get_source_dir(self): return self._settings.scripts.source def _get_target_dir(self): return self._settings.scripts.target def _get_target_path(self): return self.get_target_path( hash=get_file_hash(self._path, unique=True),", "print('Cached {asset}'.format(asset=self)) else: print(\"String asset\") class TextAsset(Asset): def __init__(self, path, lang=None): super(TextAsset, self).__init__(Asset.FILE,", "self.save(target_path) class ScriptAsset(TextAsset): @staticmethod def supported_extensions(): return ['.js', '.coffee'] @staticmethod def get_languages(settings): return", "[ \"java\", \"-jar\", self._settings.yuicompressor_file, \"--type\", \"js\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, )", "import subprocess import tempfile class AssetCollection(object): def __init__(self, file_list, settings): self._assets = []", "self._settings.html.source def _get_target_dir(self): return self._settings.html.target def _get_target_path(self): return self.get_target_path(lang=self._lang) def _parse(self): self.load() self._processor", "@staticmethod def get_languages(settings): return settings.html.languages def _get_partials_dir(self): return os.path.join(self._settings.partials, 'html') def _get_source_dir(self): return", "html.StylesheetUrlExpression, html.ScriptUrlExpression, html.ImageUrlExpression, html.AppConfExpression, html.I18nExpression, html.I18nTemplateExpression, html.ResourceUrlExpression ]) self._processor.parse() def minify(self): temp_path =", "opts['hash']) path_part = '%s%s' % (new_filename, parts[1]) if 'change_extension' in opts: new_ext =", "1:], lang=self._lang) def add_dependency(self, path, lang=None): dependency = self._collection.find_asset(path, lang) if dependency: if", "self._dependencies.append(dependency) else: print(\"Couldn't find dependency with path %s\" % path) def __eq__(self, other):", "if langs is None: return asset_class(path, None) else: return [asset_class(path, lang) for lang", "save_file(source_file, self._data) target_file = os.path.join(temp_path, \"target.js\") proc = subprocess.Popen( [ \"java\", \"-jar\", self._settings.yuicompressor_file,", "\"-jar\", self._settings.yuicompressor_file, \"--type\", \"js\", \"-o\", target_file, source_file ], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) out, err", "[] self._tool_cache = Cache() self._flag_modified = False def is_partial(self, path): return os.path.basename(path).startswith(\"_\") def", "import stylesheets, scripts, html import subprocess import tempfile class AssetCollection(object): def __init__(self, file_list,", "= self._path[len(common_prefix)+1:] if 'hash' in opts: parts = os.path.splitext(path_part) new_filename = '%s-%s' %", "path_part) return target_path def __repr__(self): if self._lang is None: t = '{path}' else:", "FILE = 0 STRING = 1 def __init__(self, resource_type, path, lang): self._resource_type =" ]
[]
[ "= [a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha, repeat=7)] with open('motifs7.txt', 'w') as f: for", "itertools import product alpha = ['A', 'T', 'C', 'G'] motifs = [a+b+c+d+e+f+g for", "['A', 'T', 'C', 'G'] motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha, repeat=7)] with", "motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha, repeat=7)] with open('motifs7.txt', 'w') as f:", "[a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha, repeat=7)] with open('motifs7.txt', 'w') as f: for item", "from itertools import product alpha = ['A', 'T', 'C', 'G'] motifs = [a+b+c+d+e+f+g", "import product alpha = ['A', 'T', 'C', 'G'] motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g", "'G'] motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha, repeat=7)] with open('motifs7.txt', 'w') as", "= ['A', 'T', 'C', 'G'] motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha, repeat=7)]", "for a,b,c,d,e,f,g in product(alpha, repeat=7)] with open('motifs7.txt', 'w') as f: for item in", "in product(alpha, repeat=7)] with open('motifs7.txt', 'w') as f: for item in motifs: f.write(\"%s\\n\"", "repeat=7)] with open('motifs7.txt', 'w') as f: for item in motifs: f.write(\"%s\\n\" % item)", "product(alpha, repeat=7)] with open('motifs7.txt', 'w') as f: for item in motifs: f.write(\"%s\\n\" %", "'C', 'G'] motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha, repeat=7)] with open('motifs7.txt', 'w')", "alpha = ['A', 'T', 'C', 'G'] motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha,", "a,b,c,d,e,f,g in product(alpha, repeat=7)] with open('motifs7.txt', 'w') as f: for item in motifs:", "'T', 'C', 'G'] motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g in product(alpha, repeat=7)] with open('motifs7.txt',", "product alpha = ['A', 'T', 'C', 'G'] motifs = [a+b+c+d+e+f+g for a,b,c,d,e,f,g in" ]
[ "# parallel for loop def parfor(my_function, my_inputs): # evaluate function in parallel, and", "loop def parfor(my_function, my_inputs): # evaluate function in parallel, and collect the results", "import csv import time args = sys.argv def err(msg): print(\"Error:\", msg) sys.exit(1) #", "os import sys import csv import time args = sys.argv def err(msg): print(\"Error:\",", "sys.argv def err(msg): print(\"Error:\", msg) sys.exit(1) # parallel for loop def parfor(my_function, my_inputs):", "def err(msg): print(\"Error:\", msg) sys.exit(1) # parallel for loop def parfor(my_function, my_inputs): #", "and collect the results import multiprocessing as mp pool = mp.Pool(mp.cpu_count()) result =", "def parfor(my_function, my_inputs): # evaluate function in parallel, and collect the results import", "the results import multiprocessing as mp pool = mp.Pool(mp.cpu_count()) result = pool.map(my_function, my_inputs)", "= sys.argv def err(msg): print(\"Error:\", msg) sys.exit(1) # parallel for loop def parfor(my_function,", "args = sys.argv def err(msg): print(\"Error:\", msg) sys.exit(1) # parallel for loop def", "<filename>bak/extract/misc.py import os import sys import csv import time args = sys.argv def", "csv import time args = sys.argv def err(msg): print(\"Error:\", msg) sys.exit(1) # parallel", "msg) sys.exit(1) # parallel for loop def parfor(my_function, my_inputs): # evaluate function in", "sys.exit(1) # parallel for loop def parfor(my_function, my_inputs): # evaluate function in parallel,", "parfor(my_function, my_inputs): # evaluate function in parallel, and collect the results import multiprocessing", "parallel, and collect the results import multiprocessing as mp pool = mp.Pool(mp.cpu_count()) result", "sys import csv import time args = sys.argv def err(msg): print(\"Error:\", msg) sys.exit(1)", "evaluate function in parallel, and collect the results import multiprocessing as mp pool", "collect the results import multiprocessing as mp pool = mp.Pool(mp.cpu_count()) result = pool.map(my_function,", "import os import sys import csv import time args = sys.argv def err(msg):", "err(msg): print(\"Error:\", msg) sys.exit(1) # parallel for loop def parfor(my_function, my_inputs): # evaluate", "import time args = sys.argv def err(msg): print(\"Error:\", msg) sys.exit(1) # parallel for", "results import multiprocessing as mp pool = mp.Pool(mp.cpu_count()) result = pool.map(my_function, my_inputs) return(result)", "for loop def parfor(my_function, my_inputs): # evaluate function in parallel, and collect the", "print(\"Error:\", msg) sys.exit(1) # parallel for loop def parfor(my_function, my_inputs): # evaluate function", "time args = sys.argv def err(msg): print(\"Error:\", msg) sys.exit(1) # parallel for loop", "# evaluate function in parallel, and collect the results import multiprocessing as mp", "parallel for loop def parfor(my_function, my_inputs): # evaluate function in parallel, and collect", "function in parallel, and collect the results import multiprocessing as mp pool =", "my_inputs): # evaluate function in parallel, and collect the results import multiprocessing as", "import sys import csv import time args = sys.argv def err(msg): print(\"Error:\", msg)", "in parallel, and collect the results import multiprocessing as mp pool = mp.Pool(mp.cpu_count())" ]
[ "direct self.sig = sig class Edge: def __init__(self, prev, to, tp, direct): self.prev", "= direct self.sig = sig class Edge: def __init__(self, prev, to, tp, direct):", "nodes = [f\"{i}\" for i in range(1, 13)] nodes.extend([chr(i) for i in range(1,", "= name self.direct = direct self.sig = sig class Edge: def __init__(self, prev,", "13)] nodes.extend([chr(i) for i in range(1, 13)]) graph.add_nodes_from([]) class Node: def __init__(self, name,", "tp, direct): self.prev = prev self.to = to self.tp = tp self.direct =", "Edge: def __init__(self, prev, to, tp, direct): self.prev = prev self.to = to", "__init__(self, prev, to, tp, direct): self.prev = prev self.to = to self.tp =", "13)]) graph.add_nodes_from([]) class Node: def __init__(self, name, direct, sig): self.name = name self.direct", "= [f\"{i}\" for i in range(1, 13)] nodes.extend([chr(i) for i in range(1, 13)])", "nx graph = nx.DiGraph() nodes = [f\"{i}\" for i in range(1, 13)] nodes.extend([chr(i)", "for i in range(1, 13)] nodes.extend([chr(i) for i in range(1, 13)]) graph.add_nodes_from([]) class", "direct): self.prev = prev self.to = to self.tp = tp self.direct = direct", "sig class Edge: def __init__(self, prev, to, tp, direct): self.prev = prev self.to", "def __init__(self, prev, to, tp, direct): self.prev = prev self.to = to self.tp", "graph.add_nodes_from([]) class Node: def __init__(self, name, direct, sig): self.name = name self.direct =", "as nx graph = nx.DiGraph() nodes = [f\"{i}\" for i in range(1, 13)]", "direct, sig): self.name = name self.direct = direct self.sig = sig class Edge:", "for i in range(1, 13)]) graph.add_nodes_from([]) class Node: def __init__(self, name, direct, sig):", "in range(1, 13)] nodes.extend([chr(i) for i in range(1, 13)]) graph.add_nodes_from([]) class Node: def", "range(1, 13)]) graph.add_nodes_from([]) class Node: def __init__(self, name, direct, sig): self.name = name", "= sig class Edge: def __init__(self, prev, to, tp, direct): self.prev = prev", "import networkx as nx graph = nx.DiGraph() nodes = [f\"{i}\" for i in", "i in range(1, 13)]) graph.add_nodes_from([]) class Node: def __init__(self, name, direct, sig): self.name", "[f\"{i}\" for i in range(1, 13)] nodes.extend([chr(i) for i in range(1, 13)]) graph.add_nodes_from([])", "name, direct, sig): self.name = name self.direct = direct self.sig = sig class", "in range(1, 13)]) graph.add_nodes_from([]) class Node: def __init__(self, name, direct, sig): self.name =", "graph = nx.DiGraph() nodes = [f\"{i}\" for i in range(1, 13)] nodes.extend([chr(i) for", "sig): self.name = name self.direct = direct self.sig = sig class Edge: def", "= nx.DiGraph() nodes = [f\"{i}\" for i in range(1, 13)] nodes.extend([chr(i) for i", "to, tp, direct): self.prev = prev self.to = to self.tp = tp self.direct", "prev, to, tp, direct): self.prev = prev self.to = to self.tp = tp", "networkx as nx graph = nx.DiGraph() nodes = [f\"{i}\" for i in range(1,", "class Node: def __init__(self, name, direct, sig): self.name = name self.direct = direct", "self.name = name self.direct = direct self.sig = sig class Edge: def __init__(self,", "name self.direct = direct self.sig = sig class Edge: def __init__(self, prev, to,", "nx.DiGraph() nodes = [f\"{i}\" for i in range(1, 13)] nodes.extend([chr(i) for i in", "def __init__(self, name, direct, sig): self.name = name self.direct = direct self.sig =", "i in range(1, 13)] nodes.extend([chr(i) for i in range(1, 13)]) graph.add_nodes_from([]) class Node:", "Node: def __init__(self, name, direct, sig): self.name = name self.direct = direct self.sig", "class Edge: def __init__(self, prev, to, tp, direct): self.prev = prev self.to =", "nodes.extend([chr(i) for i in range(1, 13)]) graph.add_nodes_from([]) class Node: def __init__(self, name, direct,", "__init__(self, name, direct, sig): self.name = name self.direct = direct self.sig = sig", "self.direct = direct self.sig = sig class Edge: def __init__(self, prev, to, tp,", "self.sig = sig class Edge: def __init__(self, prev, to, tp, direct): self.prev =", "range(1, 13)] nodes.extend([chr(i) for i in range(1, 13)]) graph.add_nodes_from([]) class Node: def __init__(self," ]
[ "from django.test import TestCase # 3rd party imports # project imports from djangoflutterwave.tests.factories", "expected_response = ( '{\"tx_ref\": \"txref\"' ', \"redirect_url\": \"test\", \"public_key\": \"test\"}' ) actual_response =", "stdlib imports from unittest.mock import patch # django imports from django.test import TestCase", "mock_reverse, mock_settings, mock_create_transaction_ref ): \"\"\"Ensure a json string is returned containing the correct", "plan = FlwPlanModelFactory() user = UserFactory() expected_response = ( '{\"tx_ref\": \"txref\"' ', \"redirect_url\":", "= UserFactory() expected_response = ( '{\"tx_ref\": \"txref\"' ', \"redirect_url\": \"test\", \"public_key\": \"test\"}' )", "json string is returned containing the correct tx_ref, public_key and redirect_url\"\"\" mock_reverse.return_value =", "party imports # project imports from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags import", "for template tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params( self, mock_reverse, mock_settings,", "TestCase # 3rd party imports # project imports from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory", "import patch # django imports from django.test import TestCase # 3rd party imports", "imports # project imports from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params", "def test_pay_button_params( self, mock_reverse, mock_settings, mock_create_transaction_ref ): \"\"\"Ensure a json string is returned", "\"test\" mock_create_transaction_ref.return_value = \"txref\" plan = FlwPlanModelFactory() user = UserFactory() expected_response = (", "= \"test\" mock_create_transaction_ref.return_value = \"txref\" plan = FlwPlanModelFactory() user = UserFactory() expected_response =", "mock_create_transaction_ref ): \"\"\"Ensure a json string is returned containing the correct tx_ref, public_key", "from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params class TestTemplateTags(TestCase): \"\"\"Test suite", "\"test\" mock_settings.FLW_PUBLIC_KEY = \"test\" mock_create_transaction_ref.return_value = \"txref\" plan = FlwPlanModelFactory() user = UserFactory()", ") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params( self, mock_reverse, mock_settings, mock_create_transaction_ref ): \"\"\"Ensure a json", "mock_settings, mock_create_transaction_ref ): \"\"\"Ensure a json string is returned containing the correct tx_ref,", "mock_create_transaction_ref.return_value = \"txref\" plan = FlwPlanModelFactory() user = UserFactory() expected_response = ( '{\"tx_ref\":", "django.test import TestCase # 3rd party imports # project imports from djangoflutterwave.tests.factories import", "from unittest.mock import patch # django imports from django.test import TestCase # 3rd", "from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params class TestTemplateTags(TestCase): \"\"\"Test suite for template tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\"", "returned containing the correct tx_ref, public_key and redirect_url\"\"\" mock_reverse.return_value = \"test\" mock_settings.FLW_PUBLIC_KEY =", "', \"redirect_url\": \"test\", \"public_key\": \"test\"}' ) actual_response = pay_button_params(user_pk=user.pk, plan_pk=plan.pk) mock_reverse.assert_called() self.assertEqual(expected_response, actual_response)", "# project imports from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params class", "tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params( self, mock_reverse, mock_settings, mock_create_transaction_ref ):", "mock_reverse.return_value = \"test\" mock_settings.FLW_PUBLIC_KEY = \"test\" mock_create_transaction_ref.return_value = \"txref\" plan = FlwPlanModelFactory() user", "self, mock_reverse, mock_settings, mock_create_transaction_ref ): \"\"\"Ensure a json string is returned containing the", "project imports from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params class TestTemplateTags(TestCase):", "\"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params( self, mock_reverse, mock_settings, mock_create_transaction_ref ): \"\"\"Ensure a", "template tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params( self, mock_reverse, mock_settings, mock_create_transaction_ref", "import TestCase # 3rd party imports # project imports from djangoflutterwave.tests.factories import FlwPlanModelFactory,", "test_pay_button_params( self, mock_reverse, mock_settings, mock_create_transaction_ref ): \"\"\"Ensure a json string is returned containing", "imports from django.test import TestCase # 3rd party imports # project imports from", "the correct tx_ref, public_key and redirect_url\"\"\" mock_reverse.return_value = \"test\" mock_settings.FLW_PUBLIC_KEY = \"test\" mock_create_transaction_ref.return_value", "and redirect_url\"\"\" mock_reverse.return_value = \"test\" mock_settings.FLW_PUBLIC_KEY = \"test\" mock_create_transaction_ref.return_value = \"txref\" plan =", "( '{\"tx_ref\": \"txref\"' ', \"redirect_url\": \"test\", \"public_key\": \"test\"}' ) actual_response = pay_button_params(user_pk=user.pk, plan_pk=plan.pk)", "@patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params( self, mock_reverse, mock_settings, mock_create_transaction_ref ): \"\"\"Ensure a json string is", "djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params class TestTemplateTags(TestCase): \"\"\"Test suite for template tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" )", "FlwPlanModelFactory, UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params class TestTemplateTags(TestCase): \"\"\"Test suite for template tags\"\"\"", "\"txref\"' ', \"redirect_url\": \"test\", \"public_key\": \"test\"}' ) actual_response = pay_button_params(user_pk=user.pk, plan_pk=plan.pk) mock_reverse.assert_called() self.assertEqual(expected_response,", "@patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params( self, mock_reverse, mock_settings, mock_create_transaction_ref ): \"\"\"Ensure a json string", "\"\"\"Test suite for template tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params( self,", "UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params class TestTemplateTags(TestCase): \"\"\"Test suite for template tags\"\"\" @patch(", "patch # django imports from django.test import TestCase # 3rd party imports #", "pay_button_params class TestTemplateTags(TestCase): \"\"\"Test suite for template tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\")", "redirect_url\"\"\" mock_reverse.return_value = \"test\" mock_settings.FLW_PUBLIC_KEY = \"test\" mock_create_transaction_ref.return_value = \"txref\" plan = FlwPlanModelFactory()", "djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params class TestTemplateTags(TestCase): \"\"\"Test suite for", "= \"txref\" plan = FlwPlanModelFactory() user = UserFactory() expected_response = ( '{\"tx_ref\": \"txref\"'", "string is returned containing the correct tx_ref, public_key and redirect_url\"\"\" mock_reverse.return_value = \"test\"", "is returned containing the correct tx_ref, public_key and redirect_url\"\"\" mock_reverse.return_value = \"test\" mock_settings.FLW_PUBLIC_KEY", "): \"\"\"Ensure a json string is returned containing the correct tx_ref, public_key and", "# django imports from django.test import TestCase # 3rd party imports # project", "a json string is returned containing the correct tx_ref, public_key and redirect_url\"\"\" mock_reverse.return_value", "import FlwPlanModelFactory, UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params class TestTemplateTags(TestCase): \"\"\"Test suite for template", "FlwPlanModelFactory() user = UserFactory() expected_response = ( '{\"tx_ref\": \"txref\"' ', \"redirect_url\": \"test\", \"public_key\":", "3rd party imports # project imports from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags", "public_key and redirect_url\"\"\" mock_reverse.return_value = \"test\" mock_settings.FLW_PUBLIC_KEY = \"test\" mock_create_transaction_ref.return_value = \"txref\" plan", "correct tx_ref, public_key and redirect_url\"\"\" mock_reverse.return_value = \"test\" mock_settings.FLW_PUBLIC_KEY = \"test\" mock_create_transaction_ref.return_value =", "<reponame>bdelate/django-flutterwave # stdlib imports from unittest.mock import patch # django imports from django.test", "containing the correct tx_ref, public_key and redirect_url\"\"\" mock_reverse.return_value = \"test\" mock_settings.FLW_PUBLIC_KEY = \"test\"", "= FlwPlanModelFactory() user = UserFactory() expected_response = ( '{\"tx_ref\": \"txref\"' ', \"redirect_url\": \"test\",", "class TestTemplateTags(TestCase): \"\"\"Test suite for template tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def", "\"txref\" plan = FlwPlanModelFactory() user = UserFactory() expected_response = ( '{\"tx_ref\": \"txref\"' ',", "@patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params( self, mock_reverse, mock_settings, mock_create_transaction_ref ): \"\"\"Ensure", "TestTemplateTags(TestCase): \"\"\"Test suite for template tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params(", "user = UserFactory() expected_response = ( '{\"tx_ref\": \"txref\"' ', \"redirect_url\": \"test\", \"public_key\": \"test\"}'", "django imports from django.test import TestCase # 3rd party imports # project imports", "imports from unittest.mock import patch # django imports from django.test import TestCase #", "unittest.mock import patch # django imports from django.test import TestCase # 3rd party", "import pay_button_params class TestTemplateTags(TestCase): \"\"\"Test suite for template tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\")", "= \"test\" mock_settings.FLW_PUBLIC_KEY = \"test\" mock_create_transaction_ref.return_value = \"txref\" plan = FlwPlanModelFactory() user =", "suite for template tags\"\"\" @patch( \"djangoflutterwave.templatetags.djangoflutterwave_tags.create_transaction_ref\" ) @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.settings\") @patch(\"djangoflutterwave.templatetags.djangoflutterwave_tags.reverse\") def test_pay_button_params( self, mock_reverse,", "\"\"\"Ensure a json string is returned containing the correct tx_ref, public_key and redirect_url\"\"\"", "'{\"tx_ref\": \"txref\"' ', \"redirect_url\": \"test\", \"public_key\": \"test\"}' ) actual_response = pay_button_params(user_pk=user.pk, plan_pk=plan.pk) mock_reverse.assert_called()", "= ( '{\"tx_ref\": \"txref\"' ', \"redirect_url\": \"test\", \"public_key\": \"test\"}' ) actual_response = pay_button_params(user_pk=user.pk,", "tx_ref, public_key and redirect_url\"\"\" mock_reverse.return_value = \"test\" mock_settings.FLW_PUBLIC_KEY = \"test\" mock_create_transaction_ref.return_value = \"txref\"", "# stdlib imports from unittest.mock import patch # django imports from django.test import", "# 3rd party imports # project imports from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory from", "imports from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory from djangoflutterwave.templatetags.djangoflutterwave_tags import pay_button_params class TestTemplateTags(TestCase): \"\"\"Test", "mock_settings.FLW_PUBLIC_KEY = \"test\" mock_create_transaction_ref.return_value = \"txref\" plan = FlwPlanModelFactory() user = UserFactory() expected_response", "UserFactory() expected_response = ( '{\"tx_ref\": \"txref\"' ', \"redirect_url\": \"test\", \"public_key\": \"test\"}' ) actual_response" ]
[]
[ "is not None: result.append(node._element) node = node._next self.assertEqual([1, 2, 3, 4, 5], result)", "return head._next class MyTestCase(unittest.TestCase): def test_something(self): head = Node(0, None) current = head", "is None: raise ValueError(\"Linked list is empty\") return head._next class MyTestCase(unittest.TestCase): def test_something(self):", "head for i in range(1, 6): new_node = Node(i, None) current._next = new_node", "second = from_second(head) result = [] node = second while node is not", "None: raise ValueError(\"Linked list is empty\") return head._next class MyTestCase(unittest.TestCase): def test_something(self): head", "while node is not None: result.append(node._element) node = node._next self.assertEqual([1, 2, 3, 4,", "second while node is not None: result.append(node._element) node = node._next self.assertEqual([1, 2, 3,", "current = head for i in range(1, 6): new_node = Node(i, None) current._next", "Node(0, None) current = head for i in range(1, 6): new_node = Node(i,", "= head for i in range(1, 6): new_node = Node(i, None) current._next =", "unittest from datastructure.links.Node import Node def from_second(head): if head is None: raise ValueError(\"Linked", "= Node(0, None) current = head for i in range(1, 6): new_node =", "= new_node second = from_second(head) result = [] node = second while node", "= second while node is not None: result.append(node._element) node = node._next self.assertEqual([1, 2,", "current._next = new_node current = new_node second = from_second(head) result = [] node", "MyTestCase(unittest.TestCase): def test_something(self): head = Node(0, None) current = head for i in", "node is not None: result.append(node._element) node = node._next self.assertEqual([1, 2, 3, 4, 5],", "ValueError(\"Linked list is empty\") return head._next class MyTestCase(unittest.TestCase): def test_something(self): head = Node(0,", "= Node(i, None) current._next = new_node current = new_node second = from_second(head) result", "[] node = second while node is not None: result.append(node._element) node = node._next", "range(1, 6): new_node = Node(i, None) current._next = new_node current = new_node second", "None) current = head for i in range(1, 6): new_node = Node(i, None)", "list is empty\") return head._next class MyTestCase(unittest.TestCase): def test_something(self): head = Node(0, None)", "None) current._next = new_node current = new_node second = from_second(head) result = []", "class MyTestCase(unittest.TestCase): def test_something(self): head = Node(0, None) current = head for i", "head = Node(0, None) current = head for i in range(1, 6): new_node", "6): new_node = Node(i, None) current._next = new_node current = new_node second =", "= [] node = second while node is not None: result.append(node._element) node =", "head is None: raise ValueError(\"Linked list is empty\") return head._next class MyTestCase(unittest.TestCase): def", "None: result.append(node._element) node = node._next self.assertEqual([1, 2, 3, 4, 5], result) if __name__", "new_node second = from_second(head) result = [] node = second while node is", "new_node current = new_node second = from_second(head) result = [] node = second", "result = [] node = second while node is not None: result.append(node._element) node", "if head is None: raise ValueError(\"Linked list is empty\") return head._next class MyTestCase(unittest.TestCase):", "import unittest from datastructure.links.Node import Node def from_second(head): if head is None: raise", "from_second(head): if head is None: raise ValueError(\"Linked list is empty\") return head._next class", "test_something(self): head = Node(0, None) current = head for i in range(1, 6):", "from datastructure.links.Node import Node def from_second(head): if head is None: raise ValueError(\"Linked list", "not None: result.append(node._element) node = node._next self.assertEqual([1, 2, 3, 4, 5], result) if", "def from_second(head): if head is None: raise ValueError(\"Linked list is empty\") return head._next", "i in range(1, 6): new_node = Node(i, None) current._next = new_node current =", "= new_node current = new_node second = from_second(head) result = [] node =", "new_node = Node(i, None) current._next = new_node current = new_node second = from_second(head)", "in range(1, 6): new_node = Node(i, None) current._next = new_node current = new_node", "result.append(node._element) node = node._next self.assertEqual([1, 2, 3, 4, 5], result) if __name__ ==", "= from_second(head) result = [] node = second while node is not None:", "from_second(head) result = [] node = second while node is not None: result.append(node._element)", "Node def from_second(head): if head is None: raise ValueError(\"Linked list is empty\") return", "raise ValueError(\"Linked list is empty\") return head._next class MyTestCase(unittest.TestCase): def test_something(self): head =", "for i in range(1, 6): new_node = Node(i, None) current._next = new_node current", "is empty\") return head._next class MyTestCase(unittest.TestCase): def test_something(self): head = Node(0, None) current", "node = second while node is not None: result.append(node._element) node = node._next self.assertEqual([1,", "node = node._next self.assertEqual([1, 2, 3, 4, 5], result) if __name__ == '__main__':", "= node._next self.assertEqual([1, 2, 3, 4, 5], result) if __name__ == '__main__': unittest.main()", "datastructure.links.Node import Node def from_second(head): if head is None: raise ValueError(\"Linked list is", "Node(i, None) current._next = new_node current = new_node second = from_second(head) result =", "empty\") return head._next class MyTestCase(unittest.TestCase): def test_something(self): head = Node(0, None) current =", "head._next class MyTestCase(unittest.TestCase): def test_something(self): head = Node(0, None) current = head for", "import Node def from_second(head): if head is None: raise ValueError(\"Linked list is empty\")", "current = new_node second = from_second(head) result = [] node = second while", "def test_something(self): head = Node(0, None) current = head for i in range(1," ]
[ "= True dependencies = [ ] operations = [ migrations.CreateModel( name='DicaModel', fields=[ ('descricao',", "] operations = [ migrations.CreateModel( name='DicaModel', fields=[ ('descricao', models.CharField(max_length=200, primary_key=True)), ], options={ 'db_table':", "dependencies = [ ] operations = [ migrations.CreateModel( name='DicaModel', fields=[ ('descricao', models.CharField(max_length=200, primary_key=True)),", "initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='DicaModel', fields=[", "django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ]", "from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [", "= [ migrations.CreateModel( name='DicaModel', fields=[ ('descricao', models.CharField(max_length=200, primary_key=True)), ], options={ 'db_table': 'dica', },", "models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [", "migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations =", "<gh_stars>1-10 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies =", "class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel(", "migrations.CreateModel( name='DicaModel', fields=[ ('descricao', models.CharField(max_length=200, primary_key=True)), ], options={ 'db_table': 'dica', }, ), ]", "[ ] operations = [ migrations.CreateModel( name='DicaModel', fields=[ ('descricao', models.CharField(max_length=200, primary_key=True)), ], options={", "Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='DicaModel',", "operations = [ migrations.CreateModel( name='DicaModel', fields=[ ('descricao', models.CharField(max_length=200, primary_key=True)), ], options={ 'db_table': 'dica',", "import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations", "= [ ] operations = [ migrations.CreateModel( name='DicaModel', fields=[ ('descricao', models.CharField(max_length=200, primary_key=True)), ],", "[ migrations.CreateModel( name='DicaModel', fields=[ ('descricao', models.CharField(max_length=200, primary_key=True)), ], options={ 'db_table': 'dica', }, ),", "True dependencies = [ ] operations = [ migrations.CreateModel( name='DicaModel', fields=[ ('descricao', models.CharField(max_length=200," ]
[ "from layer._abstract import AbstractLayer class NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层 <-> 主机网络层 <-> 主机物理层的消息收发。 \"\"\"", "# 流量控制。 sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary), self.__phy) def should_receive(self, port: str) -> bool: \"\"\"判断本层是否应该接收某帧。", "-> int: \"\"\"向本机应用层发送消息。 Args: message: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" return self._send(message, self.__app) def", "device_id: str) -> None: \"\"\"初始化主机网络层。 Args: device_id: 该主机的设备号。 \"\"\" self.__device_id = device_id self.__app", "= device_id self.__app = f\"1{device_id}300\" self.__port = f\"1{device_id}200\" self.__phy = f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder", "dst: ACK 的目的地,即原消息的源。 Returns: 生成的 ACK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def build_nak(self,", "本机应用层传来的消息数据。 Returns: 打包的帧列表。 \"\"\" message = app_data[\"message\"] frame_num = Frame.calc_num(message) # 第一帧是请求帧。 request_frame", "= Network.RECV_TIMEOUT) -> tuple[str, bool]: \"\"\"接收来自本机物理层的消息。 Args: timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns: - [0]", "device_id self.__app = f\"1{device_id}300\" self.__port = f\"1{device_id}200\" self.__phy = f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder =", "build_nak(self, dst: str) -> Frame: \"\"\"生成 NAK 帧。 Args: dst: NAK 的目的地,即原消息的源。 Returns:", "\"\" while port != self.__app: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) return message def", "build_pool(self, app_data: dict) -> list[Frame]: \"\"\"将消息打包为帧。 Args: app_data: 本机应用层传来的消息数据。 Returns: 打包的帧列表。 \"\"\" message", "receive_from_phy(self, timeout: int = Network.RECV_TIMEOUT) -> tuple[str, bool]: \"\"\"接收来自本机物理层的消息。 Args: timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。", "== self.__app: return message, True elif port == self.__phy: return bits_to_string(message), False else:", "dst: NAK 的目的地,即原消息的源。 Returns: 生成的 NAK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def parse_reply(self,", "def build_ack(self, dst: str) -> Frame: \"\"\"生成 ACK 帧。 Args: dst: ACK 的目的地,即原消息的源。", "False def parse_message(self, binary: str) -> Frame: \"\"\"解析消息。 Args: binary: 含有消息的 01 字符串。", "True if response.reply_state == ReplyState.ACK else False def parse_message(self, binary: str) -> Frame:", "message: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" return self._send(message, self.__app) def send_to_phy(self, binary: str) ->", "success = self._receive(timeout=timeout) binary = bits_to_string(binary) if success else binary return binary, success", "Args: binary: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" # 流量控制。 sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary), self.__phy) def", "FrameBuilder() self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL, data=\"\", ) self.__parser = FrameParser() def __str__(self) -> str:", ": (i + 1) * FrameParam.DATA_LEN ], ) for i in range(frame_num -", "if port == self.__app: return message, True elif port == self.__phy: return bits_to_string(message),", "], ) for i in range(frame_num - 1) ] ) # 最后一帧是结束帧。 final_frame", "while True: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) if port == self.__app: return message,", "while port != self.__app: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) return message def receive_from_phy(self,", "bits_to_string(binary) if success else binary return binary, success def send_to_app(self, message: str) ->", "frame_num = Frame.calc_num(message) # 第一帧是请求帧。 request_frame = self.__normal_builder.build( session_state=SessionState.REQ_TXT if app_data[\"msgtype\"] == MessageType.TEXT", "return message, True elif port == self.__phy: return bits_to_string(message), False else: continue def", "== self.__phy: return bits_to_string(message), False else: continue def receive_from_app(self) -> str: \"\"\"接收来自本机应用层的消息。 Returns:", "success def send_to_app(self, message: str) -> int: \"\"\"向本机应用层发送消息。 Args: message: 要发送的消息。 Returns: 总共发送的字节数。", "bool: \"\"\"解析回复。 Args: binary: 含有回复的 01 字符串。 Returns: ACK 为 `True`,NAK 为 `False`。", "\"\"\" binary, _, success = self._receive(timeout=timeout) binary = bits_to_string(binary) if success else binary", "FrameParam.DATA_LEN :], ) frame_pool.append(final_frame) return frame_pool def build_ack(self, dst: str) -> Frame: \"\"\"生成", "None: \"\"\"初始化主机网络层。 Args: device_id: 该主机的设备号。 \"\"\" self.__device_id = device_id self.__app = f\"1{device_id}300\" self.__port", "-> bool: \"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。 Returns: 应该接收为 `True`,不应该接收为 `False`。 \"\"\" return port in", "1) * FrameParam.DATA_LEN ], ) for i in range(frame_num - 1) ] )", "为 `True`,NAK 为 `False`。 \"\"\" response = self.__parser.parse(binary) return True if response.reply_state ==", "port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) if port == self.__app: return message, True elif port", "FrameParser() def __str__(self) -> str: \"\"\"打印设备号与端口号。\"\"\" return f\"[Device {self.__device_id}] <Net Layer @{self.__port}>\\n{'-'*30}\" def", "if success else binary return binary, success def send_to_app(self, message: str) -> int:", "\"\"\"将消息打包为帧。 Args: app_data: 本机应用层传来的消息数据。 Returns: 打包的帧列表。 \"\"\" message = app_data[\"message\"] frame_num = Frame.calc_num(message)", "frame_pool.append(final_frame) return frame_pool def build_ack(self, dst: str) -> Frame: \"\"\"生成 ACK 帧。 Args:", "Args: 发来的帧的目的端口号。 Returns: 应该接收为 `True`,不应该接收为 `False`。 \"\"\" return port in (self.__app, Topology.BROADCAST_PORT) def", "字符串。 Returns: ACK 为 `True`,NAK 为 `False`。 \"\"\" response = self.__parser.parse(binary) return True", "receive_all(self) -> tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns: - [0] 接收到的消息。 - [1] 本机应用层发来为 `True`,本机物理层发来为", "\"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。 \"\"\" port = \"\" while port != self.__app: message, port,", "Args: dst: NAK 的目的地,即原消息的源。 Returns: 生成的 NAK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def", "\"\"\"接收来自本机物理层的消息。 Args: timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns: - [0] 接收到的消息。 - [1] 接收成功为 `True`,接收超时为", "`False`。 \"\"\" return port in (self.__app, Topology.BROADCAST_PORT) def build_pool(self, app_data: dict) -> list[Frame]:", "- [1] 本机应用层发来为 `True`,本机物理层发来为 `False`。 \"\"\" while True: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE)", "发来的帧的目的端口号。 Returns: 应该接收为 `True`,不应该接收为 `False`。 \"\"\" return port in (self.__app, Topology.BROADCAST_PORT) def build_pool(self,", "def parse_message(self, binary: str) -> Frame: \"\"\"解析消息。 Args: binary: 含有消息的 01 字符串。 Returns:", "self.__app) def send_to_phy(self, binary: str) -> int: \"\"\"向本机物理层发送消息。 Args: binary: 要发送的消息。 Returns: 总共发送的字节数。", "<gh_stars>1-10 from time import sleep from utils.coding import * from utils.frame import *", "timeout: int = Network.RECV_TIMEOUT) -> tuple[str, bool]: \"\"\"接收来自本机物理层的消息。 Args: timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns:", "ACK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def build_nak(self, dst: str) -> Frame: \"\"\"生成", "str) -> Frame: \"\"\"生成 NAK 帧。 Args: dst: NAK 的目的地,即原消息的源。 Returns: 生成的 NAK", "ReplyState.ACK else False def parse_message(self, binary: str) -> Frame: \"\"\"解析消息。 Args: binary: 含有消息的", "Returns: 接收到的消息。 \"\"\" port = \"\" while port != self.__app: message, port, _", "= FrameBuilder() self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL, data=\"\", ) self.__parser = FrameParser() def __str__(self) ->", "生成的 ACK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def build_nak(self, dst: str) -> Frame:", "session_state=SessionState.NORMAL, data=message[ i * FrameParam.DATA_LEN : (i + 1) * FrameParam.DATA_LEN ], )", "str) -> bool: \"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。 Returns: 应该接收为 `True`,不应该接收为 `False`。 \"\"\" return port", "return True if response.reply_state == ReplyState.ACK else False def parse_message(self, binary: str) ->", "self.__app: return message, True elif port == self.__phy: return bits_to_string(message), False else: continue", "Topology.BROADCAST_PORT) def build_pool(self, app_data: dict) -> list[Frame]: \"\"\"将消息打包为帧。 Args: app_data: 本机应用层传来的消息数据。 Returns: 打包的帧列表。", "list[Frame]: \"\"\"将消息打包为帧。 Args: app_data: 本机应用层传来的消息数据。 Returns: 打包的帧列表。 \"\"\" message = app_data[\"message\"] frame_num =", "-> tuple[str, bool]: \"\"\"接收来自本机物理层的消息。 Args: timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns: - [0] 接收到的消息。 -", "__str__(self) -> str: \"\"\"打印设备号与端口号。\"\"\" return f\"[Device {self.__device_id}] <Net Layer @{self.__port}>\\n{'-'*30}\" def receive_all(self) ->", "binary: str) -> int: \"\"\"向本机物理层发送消息。 Args: binary: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" # 流量控制。", "[1] 本机应用层发来为 `True`,本机物理层发来为 `False`。 \"\"\" while True: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) if", "utils.coding import * from utils.frame import * from utils.params import * from layer._abstract", "\"\"\" def __init__(self, device_id: str) -> None: \"\"\"初始化主机网络层。 Args: device_id: 该主机的设备号。 \"\"\" self.__device_id", "\"\"\" # 流量控制。 sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary), self.__phy) def should_receive(self, port: str) -> bool:", "tuple[str, bool]: \"\"\"接收来自本机物理层的消息。 Args: timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns: - [0] 接收到的消息。 - [1]", "self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[ i * FrameParam.DATA_LEN : (i + 1) * FrameParam.DATA_LEN ],", "utils.params import * from layer._abstract import AbstractLayer class NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层 <-> 主机网络层", "def __str__(self) -> str: \"\"\"打印设备号与端口号。\"\"\" return f\"[Device {self.__device_id}] <Net Layer @{self.__port}>\\n{'-'*30}\" def receive_all(self)", "port != self.__app: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) return message def receive_from_phy(self, timeout:", "_ = self._receive(bufsize=Network.IN_NE_BUFSIZE) return message def receive_from_phy(self, timeout: int = Network.RECV_TIMEOUT) -> tuple[str,", "False else: continue def receive_from_app(self) -> str: \"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。 \"\"\" port =", "\"\"\" self.__device_id = device_id self.__app = f\"1{device_id}300\" self.__port = f\"1{device_id}200\" self.__phy = f\"1{device_id}100\"", "build_ack(self, dst: str) -> Frame: \"\"\"生成 ACK 帧。 Args: dst: ACK 的目的地,即原消息的源。 Returns:", "elif port == self.__phy: return bits_to_string(message), False else: continue def receive_from_app(self) -> str:", "str: \"\"\"打印设备号与端口号。\"\"\" return f\"[Device {self.__device_id}] <Net Layer @{self.__port}>\\n{'-'*30}\" def receive_all(self) -> tuple[str, bool]:", "Frame: \"\"\"生成 ACK 帧。 Args: dst: ACK 的目的地,即原消息的源。 Returns: 生成的 ACK 帧。 \"\"\"", ":], ) frame_pool.append(final_frame) return frame_pool def build_ack(self, dst: str) -> Frame: \"\"\"生成 ACK", "self.__normal_builder = FrameBuilder() self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK, ) self.__reply_builder = FrameBuilder() self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL,", "from time import sleep from utils.coding import * from utils.frame import * from", "`True`,接收超时为 `False`。 \"\"\" binary, _, success = self._receive(timeout=timeout) binary = bits_to_string(binary) if success", "self.__app: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) return message def receive_from_phy(self, timeout: int =", "tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns: - [0] 接收到的消息。 - [1] 本机应用层发来为 `True`,本机物理层发来为 `False`。 \"\"\"", "import AbstractLayer class NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层 <-> 主机网络层 <-> 主机物理层的消息收发。 \"\"\" def __init__(self,", "接收成功为 `True`,接收超时为 `False`。 \"\"\" binary, _, success = self._receive(timeout=timeout) binary = bits_to_string(binary) if", "最后一帧是结束帧。 final_frame = self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num - 1) * FrameParam.DATA_LEN :], ) frame_pool.append(final_frame)", "import * from utils.frame import * from utils.params import * from layer._abstract import", "return message def receive_from_phy(self, timeout: int = Network.RECV_TIMEOUT) -> tuple[str, bool]: \"\"\"接收来自本机物理层的消息。 Args:", "Returns: ACK 为 `True`,NAK 为 `False`。 \"\"\" response = self.__parser.parse(binary) return True if", "\"\"\"生成 NAK 帧。 Args: dst: NAK 的目的地,即原消息的源。 Returns: 生成的 NAK 帧。 \"\"\" return", "bool: \"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。 Returns: 应该接收为 `True`,不应该接收为 `False`。 \"\"\" return port in (self.__app,", "request_frame = self.__normal_builder.build( session_state=SessionState.REQ_TXT if app_data[\"msgtype\"] == MessageType.TEXT else SessionState.REQ_IMG, dst=app_data[\"dst\"], ) frame_pool", "= \"\" while port != self.__app: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) return message", "-> Frame: \"\"\"生成 NAK 帧。 Args: dst: NAK 的目的地,即原消息的源。 Returns: 生成的 NAK 帧。", "app_data[\"msgtype\"] == MessageType.TEXT else SessionState.REQ_IMG, dst=app_data[\"dst\"], ) frame_pool = [request_frame] # 中间的帧是常规帧。 frame_pool.extend(", "import sleep from utils.coding import * from utils.frame import * from utils.params import", "Args: message: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" return self._send(message, self.__app) def send_to_phy(self, binary: str)", "self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL, data=\"\", ) self.__parser = FrameParser() def __str__(self) -> str: \"\"\"打印设备号与端口号。\"\"\"", ") self.__reply_builder = FrameBuilder() self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL, data=\"\", ) self.__parser = FrameParser() def", "self.__phy) def should_receive(self, port: str) -> bool: \"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。 Returns: 应该接收为 `True`,不应该接收为", "- [0] 接收到的消息。 - [1] 接收成功为 `True`,接收超时为 `False`。 \"\"\" binary, _, success =", "app_data: dict) -> list[Frame]: \"\"\"将消息打包为帧。 Args: app_data: 本机应用层传来的消息数据。 Returns: 打包的帧列表。 \"\"\" message =", "def receive_all(self) -> tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns: - [0] 接收到的消息。 - [1] 本机应用层发来为", "_ = self._receive(bufsize=Network.IN_NE_BUFSIZE) if port == self.__app: return message, True elif port ==", "FrameParam.DATA_LEN ], ) for i in range(frame_num - 1) ] ) # 最后一帧是结束帧。", "{self.__device_id}] <Net Layer @{self.__port}>\\n{'-'*30}\" def receive_all(self) -> tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns: - [0]", "[0] 接收到的消息。 - [1] 本机应用层发来为 `True`,本机物理层发来为 `False`。 \"\"\" while True: message, port, _", "接收到的消息。 - [1] 本机应用层发来为 `True`,本机物理层发来为 `False`。 \"\"\" while True: message, port, _ =", "dst=dst) def build_nak(self, dst: str) -> Frame: \"\"\"生成 NAK 帧。 Args: dst: NAK", "self._receive(timeout=timeout) binary = bits_to_string(binary) if success else binary return binary, success def send_to_app(self,", "Frame.calc_num(message) # 第一帧是请求帧。 request_frame = self.__normal_builder.build( session_state=SessionState.REQ_TXT if app_data[\"msgtype\"] == MessageType.TEXT else SessionState.REQ_IMG,", "- [1] 接收成功为 `True`,接收超时为 `False`。 \"\"\" binary, _, success = self._receive(timeout=timeout) binary =", "app_data[\"message\"] frame_num = Frame.calc_num(message) # 第一帧是请求帧。 request_frame = self.__normal_builder.build( session_state=SessionState.REQ_TXT if app_data[\"msgtype\"] ==", ") self.__parser = FrameParser() def __str__(self) -> str: \"\"\"打印设备号与端口号。\"\"\" return f\"[Device {self.__device_id}] <Net", "(self.__app, Topology.BROADCAST_PORT) def build_pool(self, app_data: dict) -> list[Frame]: \"\"\"将消息打包为帧。 Args: app_data: 本机应用层传来的消息数据。 Returns:", "Returns: 打包的帧列表。 \"\"\" message = app_data[\"message\"] frame_num = Frame.calc_num(message) # 第一帧是请求帧。 request_frame =", ") for i in range(frame_num - 1) ] ) # 最后一帧是结束帧。 final_frame =", "return frame_pool def build_ack(self, dst: str) -> Frame: \"\"\"生成 ACK 帧。 Args: dst:", "str) -> bool: \"\"\"解析回复。 Args: binary: 含有回复的 01 字符串。 Returns: ACK 为 `True`,NAK", "NAK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def parse_reply(self, binary: str) -> bool: \"\"\"解析回复。", "return f\"[Device {self.__device_id}] <Net Layer @{self.__port}>\\n{'-'*30}\" def receive_all(self) -> tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns:", "self._receive(bufsize=Network.IN_NE_BUFSIZE) if port == self.__app: return message, True elif port == self.__phy: return", "data=message[ i * FrameParam.DATA_LEN : (i + 1) * FrameParam.DATA_LEN ], ) for", "[0] 接收到的消息。 - [1] 接收成功为 `True`,接收超时为 `False`。 \"\"\" binary, _, success = self._receive(timeout=timeout)", "\"\"\"向本机应用层发送消息。 Args: message: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" return self._send(message, self.__app) def send_to_phy(self, binary:", "Returns: - [0] 接收到的消息。 - [1] 本机应用层发来为 `True`,本机物理层发来为 `False`。 \"\"\" while True: message,", "* from utils.frame import * from utils.params import * from layer._abstract import AbstractLayer", "01 字符串。 Returns: ACK 为 `True`,NAK 为 `False`。 \"\"\" response = self.__parser.parse(binary) return", "int = Network.RECV_TIMEOUT) -> tuple[str, bool]: \"\"\"接收来自本机物理层的消息。 Args: timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns: -", "self.__reply_builder = FrameBuilder() self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL, data=\"\", ) self.__parser = FrameParser() def __str__(self)", "def receive_from_app(self) -> str: \"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。 \"\"\" port = \"\" while port", "\"\"\"打印设备号与端口号。\"\"\" return f\"[Device {self.__device_id}] <Net Layer @{self.__port}>\\n{'-'*30}\" def receive_all(self) -> tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。", "Returns: 生成的 ACK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def build_nak(self, dst: str) ->", "帧。 Args: dst: ACK 的目的地,即原消息的源。 Returns: 生成的 ACK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst)", "time import sleep from utils.coding import * from utils.frame import * from utils.params", "def receive_from_phy(self, timeout: int = Network.RECV_TIMEOUT) -> tuple[str, bool]: \"\"\"接收来自本机物理层的消息。 Args: timeout: 可选,接收超时时间,单位为秒,默认为", "Args: binary: 含有回复的 01 字符串。 Returns: ACK 为 `True`,NAK 为 `False`。 \"\"\" response", "dst: str) -> Frame: \"\"\"生成 NAK 帧。 Args: dst: NAK 的目的地,即原消息的源。 Returns: 生成的", "return self._send(message, self.__app) def send_to_phy(self, binary: str) -> int: \"\"\"向本机物理层发送消息。 Args: binary: 要发送的消息。", "# 第一帧是请求帧。 request_frame = self.__normal_builder.build( session_state=SessionState.REQ_TXT if app_data[\"msgtype\"] == MessageType.TEXT else SessionState.REQ_IMG, dst=app_data[\"dst\"],", "should_receive(self, port: str) -> bool: \"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。 Returns: 应该接收为 `True`,不应该接收为 `False`。 \"\"\"", "session_state=SessionState.REQ_TXT if app_data[\"msgtype\"] == MessageType.TEXT else SessionState.REQ_IMG, dst=app_data[\"dst\"], ) frame_pool = [request_frame] #", "\"\"\"初始化主机网络层。 Args: device_id: 该主机的设备号。 \"\"\" self.__device_id = device_id self.__app = f\"1{device_id}300\" self.__port =", "message: str) -> int: \"\"\"向本机应用层发送消息。 Args: message: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" return self._send(message,", "True: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) if port == self.__app: return message, True", "!= self.__app: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) return message def receive_from_phy(self, timeout: int", "\"\"\"生成 ACK 帧。 Args: dst: ACK 的目的地,即原消息的源。 Returns: 生成的 ACK 帧。 \"\"\" return", "= f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder = FrameBuilder() self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK, ) self.__reply_builder = FrameBuilder()", "FrameBuilder() self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK, ) self.__reply_builder = FrameBuilder() self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL, data=\"\", )", "utils.frame import * from utils.params import * from layer._abstract import AbstractLayer class NetLayer(AbstractLayer):", "timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns: - [0] 接收到的消息。 - [1] 接收成功为 `True`,接收超时为 `False`。 \"\"\"", "= f\"1{device_id}300\" self.__port = f\"1{device_id}200\" self.__phy = f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder = FrameBuilder() self.__normal_builder.build(", "-> None: \"\"\"初始化主机网络层。 Args: device_id: 该主机的设备号。 \"\"\" self.__device_id = device_id self.__app = f\"1{device_id}300\"", "send_to_phy(self, binary: str) -> int: \"\"\"向本机物理层发送消息。 Args: binary: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" #", "= Frame.calc_num(message) # 第一帧是请求帧。 request_frame = self.__normal_builder.build( session_state=SessionState.REQ_TXT if app_data[\"msgtype\"] == MessageType.TEXT else", "\"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def build_nak(self, dst: str) -> Frame: \"\"\"生成 NAK 帧。", "def build_pool(self, app_data: dict) -> list[Frame]: \"\"\"将消息打包为帧。 Args: app_data: 本机应用层传来的消息数据。 Returns: 打包的帧列表。 \"\"\"", "from utils.frame import * from utils.params import * from layer._abstract import AbstractLayer class", "binary: str) -> bool: \"\"\"解析回复。 Args: binary: 含有回复的 01 字符串。 Returns: ACK 为", "= self._receive(bufsize=Network.IN_NE_BUFSIZE) if port == self.__app: return message, True elif port == self.__phy:", "port = \"\" while port != self.__app: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) return", "* FrameParam.DATA_LEN :], ) frame_pool.append(final_frame) return frame_pool def build_ack(self, dst: str) -> Frame:", "device_id: 该主机的设备号。 \"\"\" self.__device_id = device_id self.__app = f\"1{device_id}300\" self.__port = f\"1{device_id}200\" self.__phy", "str) -> None: \"\"\"初始化主机网络层。 Args: device_id: 该主机的设备号。 \"\"\" self.__device_id = device_id self.__app =", "self._send(message, self.__app) def send_to_phy(self, binary: str) -> int: \"\"\"向本机物理层发送消息。 Args: binary: 要发送的消息。 Returns:", "sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary), self.__phy) def should_receive(self, port: str) -> bool: \"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。", "port == self.__phy: return bits_to_string(message), False else: continue def receive_from_app(self) -> str: \"\"\"接收来自本机应用层的消息。", "接收到的消息。 \"\"\" port = \"\" while port != self.__app: message, port, _ =", "\"\"\"向本机物理层发送消息。 Args: binary: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" # 流量控制。 sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary), self.__phy)", "\"\"\"接收来自本机应用层与本机物理层的消息。 Returns: - [0] 接收到的消息。 - [1] 本机应用层发来为 `True`,本机物理层发来为 `False`。 \"\"\" while True:", "Returns: 总共发送的字节数。 \"\"\" return self._send(message, self.__app) def send_to_phy(self, binary: str) -> int: \"\"\"向本机物理层发送消息。", "str) -> Frame: \"\"\"生成 ACK 帧。 Args: dst: ACK 的目的地,即原消息的源。 Returns: 生成的 ACK", "parse_message(self, binary: str) -> Frame: \"\"\"解析消息。 Args: binary: 含有消息的 01 字符串。 Returns: 收到的消息帧。", "self._receive(bufsize=Network.IN_NE_BUFSIZE) return message def receive_from_phy(self, timeout: int = Network.RECV_TIMEOUT) -> tuple[str, bool]: \"\"\"接收来自本机物理层的消息。", "= self._receive(timeout=timeout) binary = bits_to_string(binary) if success else binary return binary, success def", "for i in range(frame_num - 1) ] ) # 最后一帧是结束帧。 final_frame = self.__normal_builder.build(", "final_frame = self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num - 1) * FrameParam.DATA_LEN :], ) frame_pool.append(final_frame) return", "return port in (self.__app, Topology.BROADCAST_PORT) def build_pool(self, app_data: dict) -> list[Frame]: \"\"\"将消息打包为帧。 Args:", "def build_nak(self, dst: str) -> Frame: \"\"\"生成 NAK 帧。 Args: dst: NAK 的目的地,即原消息的源。", "应该接收为 `True`,不应该接收为 `False`。 \"\"\" return port in (self.__app, Topology.BROADCAST_PORT) def build_pool(self, app_data: dict)", "port: str) -> bool: \"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。 Returns: 应该接收为 `True`,不应该接收为 `False`。 \"\"\" return", "message = app_data[\"message\"] frame_num = Frame.calc_num(message) # 第一帧是请求帧。 request_frame = self.__normal_builder.build( session_state=SessionState.REQ_TXT if", "self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK, ) self.__reply_builder = FrameBuilder() self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL, data=\"\", ) self.__parser", "self.__app = f\"1{device_id}300\" self.__port = f\"1{device_id}200\" self.__phy = f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder = FrameBuilder()", "dst=app_data[\"dst\"], ) frame_pool = [request_frame] # 中间的帧是常规帧。 frame_pool.extend( [ self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[ i", "@{self.__port}>\\n{'-'*30}\" def receive_all(self) -> tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns: - [0] 接收到的消息。 - [1]", "else SessionState.REQ_IMG, dst=app_data[\"dst\"], ) frame_pool = [request_frame] # 中间的帧是常规帧。 frame_pool.extend( [ self.__normal_builder.build( session_state=SessionState.NORMAL,", "Network.RECV_TIMEOUT) -> tuple[str, bool]: \"\"\"接收来自本机物理层的消息。 Args: timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns: - [0] 接收到的消息。", "dst=dst) def parse_reply(self, binary: str) -> bool: \"\"\"解析回复。 Args: binary: 含有回复的 01 字符串。", "\"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def parse_reply(self, binary: str) -> bool: \"\"\"解析回复。 Args: binary:", "sleep from utils.coding import * from utils.frame import * from utils.params import *", "`RECV_TIMEOUT`。 Returns: - [0] 接收到的消息。 - [1] 接收成功为 `True`,接收超时为 `False`。 \"\"\" binary, _,", "True elif port == self.__phy: return bits_to_string(message), False else: continue def receive_from_app(self) ->", "i in range(frame_num - 1) ] ) # 最后一帧是结束帧。 final_frame = self.__normal_builder.build( session_state=SessionState.FIN,", "binary return binary, success def send_to_app(self, message: str) -> int: \"\"\"向本机应用层发送消息。 Args: message:", "打包的帧列表。 \"\"\" message = app_data[\"message\"] frame_num = Frame.calc_num(message) # 第一帧是请求帧。 request_frame = self.__normal_builder.build(", "def send_to_phy(self, binary: str) -> int: \"\"\"向本机物理层发送消息。 Args: binary: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\"", "port == self.__app: return message, True elif port == self.__phy: return bits_to_string(message), False", "的目的地,即原消息的源。 Returns: 生成的 ACK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def build_nak(self, dst: str)", "`True`,不应该接收为 `False`。 \"\"\" return port in (self.__app, Topology.BROADCAST_PORT) def build_pool(self, app_data: dict) ->", "f\"[Device {self.__device_id}] <Net Layer @{self.__port}>\\n{'-'*30}\" def receive_all(self) -> tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns: -", "* FrameParam.DATA_LEN : (i + 1) * FrameParam.DATA_LEN ], ) for i in", "- [0] 接收到的消息。 - [1] 本机应用层发来为 `True`,本机物理层发来为 `False`。 \"\"\" while True: message, port,", "* from layer._abstract import AbstractLayer class NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层 <-> 主机网络层 <-> 主机物理层的消息收发。", "self.__device_id = device_id self.__app = f\"1{device_id}300\" self.__port = f\"1{device_id}200\" self.__phy = f\"1{device_id}100\" super().__init__(self.__port)", "i * FrameParam.DATA_LEN : (i + 1) * FrameParam.DATA_LEN ], ) for i", "总共发送的字节数。 \"\"\" # 流量控制。 sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary), self.__phy) def should_receive(self, port: str) ->", "为 `False`。 \"\"\" response = self.__parser.parse(binary) return True if response.reply_state == ReplyState.ACK else", "str) -> Frame: \"\"\"解析消息。 Args: binary: 含有消息的 01 字符串。 Returns: 收到的消息帧。 \"\"\" return", "\"\"\" return self._send(message, self.__app) def send_to_phy(self, binary: str) -> int: \"\"\"向本机物理层发送消息。 Args: binary:", "Args: app_data: 本机应用层传来的消息数据。 Returns: 打包的帧列表。 \"\"\" message = app_data[\"message\"] frame_num = Frame.calc_num(message) #", "\"\"\"主机网络层。 实现了主机应用层 <-> 主机网络层 <-> 主机物理层的消息收发。 \"\"\" def __init__(self, device_id: str) -> None:", "parse_reply(self, binary: str) -> bool: \"\"\"解析回复。 Args: binary: 含有回复的 01 字符串。 Returns: ACK", "= self.__parser.parse(binary) return True if response.reply_state == ReplyState.ACK else False def parse_message(self, binary:", ") frame_pool = [request_frame] # 中间的帧是常规帧。 frame_pool.extend( [ self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[ i *", "ACK 为 `True`,NAK 为 `False`。 \"\"\" response = self.__parser.parse(binary) return True if response.reply_state", "= FrameParser() def __str__(self) -> str: \"\"\"打印设备号与端口号。\"\"\" return f\"[Device {self.__device_id}] <Net Layer @{self.__port}>\\n{'-'*30}\"", "= bits_to_string(binary) if success else binary return binary, success def send_to_app(self, message: str)", "Args: timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns: - [0] 接收到的消息。 - [1] 接收成功为 `True`,接收超时为 `False`。", "= FrameBuilder() self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK, ) self.__reply_builder = FrameBuilder() self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL, data=\"\",", "第一帧是请求帧。 request_frame = self.__normal_builder.build( session_state=SessionState.REQ_TXT if app_data[\"msgtype\"] == MessageType.TEXT else SessionState.REQ_IMG, dst=app_data[\"dst\"], )", "frame_pool = [request_frame] # 中间的帧是常规帧。 frame_pool.extend( [ self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[ i * FrameParam.DATA_LEN", "binary: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" # 流量控制。 sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary), self.__phy) def should_receive(self,", "== ReplyState.ACK else False def parse_message(self, binary: str) -> Frame: \"\"\"解析消息。 Args: binary:", "self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num - 1) * FrameParam.DATA_LEN :], ) frame_pool.append(final_frame) return frame_pool def", "if app_data[\"msgtype\"] == MessageType.TEXT else SessionState.REQ_IMG, dst=app_data[\"dst\"], ) frame_pool = [request_frame] # 中间的帧是常规帧。", "-> tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns: - [0] 接收到的消息。 - [1] 本机应用层发来为 `True`,本机物理层发来为 `False`。", "dict) -> list[Frame]: \"\"\"将消息打包为帧。 Args: app_data: 本机应用层传来的消息数据。 Returns: 打包的帧列表。 \"\"\" message = app_data[\"message\"]", "SessionState.REQ_IMG, dst=app_data[\"dst\"], ) frame_pool = [request_frame] # 中间的帧是常规帧。 frame_pool.extend( [ self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[", "if response.reply_state == ReplyState.ACK else False def parse_message(self, binary: str) -> Frame: \"\"\"解析消息。", "NAK 帧。 Args: dst: NAK 的目的地,即原消息的源。 Returns: 生成的 NAK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK,", "`True`,本机物理层发来为 `False`。 \"\"\" while True: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) if port ==", "= self.__normal_builder.build( session_state=SessionState.REQ_TXT if app_data[\"msgtype\"] == MessageType.TEXT else SessionState.REQ_IMG, dst=app_data[\"dst\"], ) frame_pool =", "流量控制。 sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary), self.__phy) def should_receive(self, port: str) -> bool: \"\"\"判断本层是否应该接收某帧。 Args:", "帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def build_nak(self, dst: str) -> Frame: \"\"\"生成 NAK", "\"\"\"解析回复。 Args: binary: 含有回复的 01 字符串。 Returns: ACK 为 `True`,NAK 为 `False`。 \"\"\"", "session_state=SessionState.FIN, data=message[(frame_num - 1) * FrameParam.DATA_LEN :], ) frame_pool.append(final_frame) return frame_pool def build_ack(self,", "- 1) * FrameParam.DATA_LEN :], ) frame_pool.append(final_frame) return frame_pool def build_ack(self, dst: str)", "接收到的消息。 - [1] 接收成功为 `True`,接收超时为 `False`。 \"\"\" binary, _, success = self._receive(timeout=timeout) binary", "\"\"\" port = \"\" while port != self.__app: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE)", "message def receive_from_phy(self, timeout: int = Network.RECV_TIMEOUT) -> tuple[str, bool]: \"\"\"接收来自本机物理层的消息。 Args: timeout:", "self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def build_nak(self, dst: str) -> Frame: \"\"\"生成 NAK 帧。 Args: dst:", ") # 最后一帧是结束帧。 final_frame = self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num - 1) * FrameParam.DATA_LEN :],", "else: continue def receive_from_app(self) -> str: \"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。 \"\"\" port = \"\"", "要发送的消息。 Returns: 总共发送的字节数。 \"\"\" return self._send(message, self.__app) def send_to_phy(self, binary: str) -> int:", "[ self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[ i * FrameParam.DATA_LEN : (i + 1) * FrameParam.DATA_LEN", "帧。 Args: dst: NAK 的目的地,即原消息的源。 Returns: 生成的 NAK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst)", "self.__port = f\"1{device_id}200\" self.__phy = f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder = FrameBuilder() self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK,", "def send_to_app(self, message: str) -> int: \"\"\"向本机应用层发送消息。 Args: message: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\"", "- 1) ] ) # 最后一帧是结束帧。 final_frame = self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num - 1)", "session_state=SessionState.NORMAL, data=\"\", ) self.__parser = FrameParser() def __str__(self) -> str: \"\"\"打印设备号与端口号。\"\"\" return f\"[Device", "data=message[(frame_num - 1) * FrameParam.DATA_LEN :], ) frame_pool.append(final_frame) return frame_pool def build_ack(self, dst:", "`True`,NAK 为 `False`。 \"\"\" response = self.__parser.parse(binary) return True if response.reply_state == ReplyState.ACK", "app_data: 本机应用层传来的消息数据。 Returns: 打包的帧列表。 \"\"\" message = app_data[\"message\"] frame_num = Frame.calc_num(message) # 第一帧是请求帧。", "* FrameParam.DATA_LEN ], ) for i in range(frame_num - 1) ] ) #", "含有回复的 01 字符串。 Returns: ACK 为 `True`,NAK 为 `False`。 \"\"\" response = self.__parser.parse(binary)", "-> int: \"\"\"向本机物理层发送消息。 Args: binary: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" # 流量控制。 sleep(Network.FLOW_INTERVAL) return", "continue def receive_from_app(self) -> str: \"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。 \"\"\" port = \"\" while", "主机物理层的消息收发。 \"\"\" def __init__(self, device_id: str) -> None: \"\"\"初始化主机网络层。 Args: device_id: 该主机的设备号。 \"\"\"", "self.__parser = FrameParser() def __str__(self) -> str: \"\"\"打印设备号与端口号。\"\"\" return f\"[Device {self.__device_id}] <Net Layer", ") frame_pool.append(final_frame) return frame_pool def build_ack(self, dst: str) -> Frame: \"\"\"生成 ACK 帧。", "ACK 帧。 Args: dst: ACK 的目的地,即原消息的源。 Returns: 生成的 ACK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK,", "生成的 NAK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def parse_reply(self, binary: str) -> bool:", "-> str: \"\"\"打印设备号与端口号。\"\"\" return f\"[Device {self.__device_id}] <Net Layer @{self.__port}>\\n{'-'*30}\" def receive_all(self) -> tuple[str,", "Layer @{self.__port}>\\n{'-'*30}\" def receive_all(self) -> tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns: - [0] 接收到的消息。 -", "-> Frame: \"\"\"解析消息。 Args: binary: 含有消息的 01 字符串。 Returns: 收到的消息帧。 \"\"\" return self.__parser.parse(binary)", "return self._send(string_to_bits(binary), self.__phy) def should_receive(self, port: str) -> bool: \"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。 Returns:", "# 最后一帧是结束帧。 final_frame = self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num - 1) * FrameParam.DATA_LEN :], )", "\"\"\" message = app_data[\"message\"] frame_num = Frame.calc_num(message) # 第一帧是请求帧。 request_frame = self.__normal_builder.build( session_state=SessionState.REQ_TXT", "= self._receive(bufsize=Network.IN_NE_BUFSIZE) return message def receive_from_phy(self, timeout: int = Network.RECV_TIMEOUT) -> tuple[str, bool]:", "self.__phy = f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder = FrameBuilder() self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK, ) self.__reply_builder =", "总共发送的字节数。 \"\"\" return self._send(message, self.__app) def send_to_phy(self, binary: str) -> int: \"\"\"向本机物理层发送消息。 Args:", "self.__normal_builder.build( session_state=SessionState.REQ_TXT if app_data[\"msgtype\"] == MessageType.TEXT else SessionState.REQ_IMG, dst=app_data[\"dst\"], ) frame_pool = [request_frame]", "_, success = self._receive(timeout=timeout) binary = bits_to_string(binary) if success else binary return binary,", "+ 1) * FrameParam.DATA_LEN ], ) for i in range(frame_num - 1) ]", "中间的帧是常规帧。 frame_pool.extend( [ self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[ i * FrameParam.DATA_LEN : (i + 1)", "return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def parse_reply(self, binary: str) -> bool: \"\"\"解析回复。 Args: binary: 含有回复的", "f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder = FrameBuilder() self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK, ) self.__reply_builder = FrameBuilder() self.__reply_builder.build(", "message, True elif port == self.__phy: return bits_to_string(message), False else: continue def receive_from_app(self)", "\"\"\" return port in (self.__app, Topology.BROADCAST_PORT) def build_pool(self, app_data: dict) -> list[Frame]: \"\"\"将消息打包为帧。", "send_to_app(self, message: str) -> int: \"\"\"向本机应用层发送消息。 Args: message: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" return", "(i + 1) * FrameParam.DATA_LEN ], ) for i in range(frame_num - 1)", "binary: str) -> Frame: \"\"\"解析消息。 Args: binary: 含有消息的 01 字符串。 Returns: 收到的消息帧。 \"\"\"", "= f\"1{device_id}200\" self.__phy = f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder = FrameBuilder() self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK, )", "f\"1{device_id}300\" self.__port = f\"1{device_id}200\" self.__phy = f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder = FrameBuilder() self.__normal_builder.build( src=self.__app,", "reply_state=ReplyState.ACK, ) self.__reply_builder = FrameBuilder() self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL, data=\"\", ) self.__parser = FrameParser()", "-> Frame: \"\"\"生成 ACK 帧。 Args: dst: ACK 的目的地,即原消息的源。 Returns: 生成的 ACK 帧。", "response.reply_state == ReplyState.ACK else False def parse_message(self, binary: str) -> Frame: \"\"\"解析消息。 Args:", "1) ] ) # 最后一帧是结束帧。 final_frame = self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num - 1) *", "本机应用层发来为 `True`,本机物理层发来为 `False`。 \"\"\" while True: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) if port", "-> str: \"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。 \"\"\" port = \"\" while port != self.__app:", "`False`。 \"\"\" while True: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) if port == self.__app:", "1) * FrameParam.DATA_LEN :], ) frame_pool.append(final_frame) return frame_pool def build_ack(self, dst: str) ->", "super().__init__(self.__port) self.__normal_builder = FrameBuilder() self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK, ) self.__reply_builder = FrameBuilder() self.__reply_builder.build( src=self.__app,", "receive_from_app(self) -> str: \"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。 \"\"\" port = \"\" while port !=", "in range(frame_num - 1) ] ) # 最后一帧是结束帧。 final_frame = self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num", "FrameParam.DATA_LEN : (i + 1) * FrameParam.DATA_LEN ], ) for i in range(frame_num", "= app_data[\"message\"] frame_num = Frame.calc_num(message) # 第一帧是请求帧。 request_frame = self.__normal_builder.build( session_state=SessionState.REQ_TXT if app_data[\"msgtype\"]", "message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) return message def receive_from_phy(self, timeout: int = Network.RECV_TIMEOUT)", "dst: str) -> Frame: \"\"\"生成 ACK 帧。 Args: dst: ACK 的目的地,即原消息的源。 Returns: 生成的", "else binary return binary, success def send_to_app(self, message: str) -> int: \"\"\"向本机应用层发送消息。 Args:", "binary, _, success = self._receive(timeout=timeout) binary = bits_to_string(binary) if success else binary return", "Returns: 总共发送的字节数。 \"\"\" # 流量控制。 sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary), self.__phy) def should_receive(self, port: str)", "str) -> int: \"\"\"向本机物理层发送消息。 Args: binary: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" # 流量控制。 sleep(Network.FLOW_INTERVAL)", "Returns: 生成的 NAK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def parse_reply(self, binary: str) ->", "= self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num - 1) * FrameParam.DATA_LEN :], ) frame_pool.append(final_frame) return frame_pool", "-> bool: \"\"\"解析回复。 Args: binary: 含有回复的 01 字符串。 Returns: ACK 为 `True`,NAK 为", "`False`。 \"\"\" response = self.__parser.parse(binary) return True if response.reply_state == ReplyState.ACK else False", "bits_to_string(message), False else: continue def receive_from_app(self) -> str: \"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。 \"\"\" port", "success else binary return binary, success def send_to_app(self, message: str) -> int: \"\"\"向本机应用层发送消息。", "帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def parse_reply(self, binary: str) -> bool: \"\"\"解析回复。 Args:", "src=self.__app, reply_state=ReplyState.ACK, ) self.__reply_builder = FrameBuilder() self.__reply_builder.build( src=self.__app, session_state=SessionState.NORMAL, data=\"\", ) self.__parser =", "f\"1{device_id}200\" self.__phy = f\"1{device_id}100\" super().__init__(self.__port) self.__normal_builder = FrameBuilder() self.__normal_builder.build( src=self.__app, reply_state=ReplyState.ACK, ) self.__reply_builder", "Args: dst: ACK 的目的地,即原消息的源。 Returns: 生成的 ACK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def", "AbstractLayer class NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层 <-> 主机网络层 <-> 主机物理层的消息收发。 \"\"\" def __init__(self, device_id:", "layer._abstract import AbstractLayer class NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层 <-> 主机网络层 <-> 主机物理层的消息收发。 \"\"\" def", "port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) return message def receive_from_phy(self, timeout: int = Network.RECV_TIMEOUT) ->", "from utils.params import * from layer._abstract import AbstractLayer class NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层 <->", "binary, success def send_to_app(self, message: str) -> int: \"\"\"向本机应用层发送消息。 Args: message: 要发送的消息。 Returns:", "binary = bits_to_string(binary) if success else binary return binary, success def send_to_app(self, message:", "] ) # 最后一帧是结束帧。 final_frame = self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num - 1) * FrameParam.DATA_LEN", "import * from utils.params import * from layer._abstract import AbstractLayer class NetLayer(AbstractLayer): \"\"\"主机网络层。", "int: \"\"\"向本机应用层发送消息。 Args: message: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" return self._send(message, self.__app) def send_to_phy(self,", "return binary, success def send_to_app(self, message: str) -> int: \"\"\"向本机应用层发送消息。 Args: message: 要发送的消息。", "src=self.__app, session_state=SessionState.NORMAL, data=\"\", ) self.__parser = FrameParser() def __str__(self) -> str: \"\"\"打印设备号与端口号。\"\"\" return", "* from utils.params import * from layer._abstract import AbstractLayer class NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层", "Returns: - [0] 接收到的消息。 - [1] 接收成功为 `True`,接收超时为 `False`。 \"\"\" binary, _, success", "-> list[Frame]: \"\"\"将消息打包为帧。 Args: app_data: 本机应用层传来的消息数据。 Returns: 打包的帧列表。 \"\"\" message = app_data[\"message\"] frame_num", "\"\"\" response = self.__parser.parse(binary) return True if response.reply_state == ReplyState.ACK else False def", "frame_pool.extend( [ self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[ i * FrameParam.DATA_LEN : (i + 1) *", "Frame: \"\"\"生成 NAK 帧。 Args: dst: NAK 的目的地,即原消息的源。 Returns: 生成的 NAK 帧。 \"\"\"", "else False def parse_message(self, binary: str) -> Frame: \"\"\"解析消息。 Args: binary: 含有消息的 01", "return bits_to_string(message), False else: continue def receive_from_app(self) -> str: \"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。 \"\"\"", "from utils.coding import * from utils.frame import * from utils.params import * from", "实现了主机应用层 <-> 主机网络层 <-> 主机物理层的消息收发。 \"\"\" def __init__(self, device_id: str) -> None: \"\"\"初始化主机网络层。", "class NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层 <-> 主机网络层 <-> 主机物理层的消息收发。 \"\"\" def __init__(self, device_id: str)", "[request_frame] # 中间的帧是常规帧。 frame_pool.extend( [ self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[ i * FrameParam.DATA_LEN : (i", "__init__(self, device_id: str) -> None: \"\"\"初始化主机网络层。 Args: device_id: 该主机的设备号。 \"\"\" self.__device_id = device_id", "data=\"\", ) self.__parser = FrameParser() def __str__(self) -> str: \"\"\"打印设备号与端口号。\"\"\" return f\"[Device {self.__device_id}]", "MessageType.TEXT else SessionState.REQ_IMG, dst=app_data[\"dst\"], ) frame_pool = [request_frame] # 中间的帧是常规帧。 frame_pool.extend( [ self.__normal_builder.build(", "ACK 的目的地,即原消息的源。 Returns: 生成的 ACK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def build_nak(self, dst:", "frame_pool def build_ack(self, dst: str) -> Frame: \"\"\"生成 ACK 帧。 Args: dst: ACK", "\"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。 Returns: 应该接收为 `True`,不应该接收为 `False`。 \"\"\" return port in (self.__app, Topology.BROADCAST_PORT)", "\"\"\" while True: message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) if port == self.__app: return", "binary: 含有回复的 01 字符串。 Returns: ACK 为 `True`,NAK 为 `False`。 \"\"\" response =", "主机网络层 <-> 主机物理层的消息收发。 \"\"\" def __init__(self, device_id: str) -> None: \"\"\"初始化主机网络层。 Args: device_id:", "Args: device_id: 该主机的设备号。 \"\"\" self.__device_id = device_id self.__app = f\"1{device_id}300\" self.__port = f\"1{device_id}200\"", "`False`。 \"\"\" binary, _, success = self._receive(timeout=timeout) binary = bits_to_string(binary) if success else", "self._send(string_to_bits(binary), self.__phy) def should_receive(self, port: str) -> bool: \"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。 Returns: 应该接收为", "self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def parse_reply(self, binary: str) -> bool: \"\"\"解析回复。 Args: binary: 含有回复的 01", "可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns: - [0] 接收到的消息。 - [1] 接收成功为 `True`,接收超时为 `False`。 \"\"\" binary,", "NAK 的目的地,即原消息的源。 Returns: 生成的 NAK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def parse_reply(self, binary:", "str: \"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。 \"\"\" port = \"\" while port != self.__app: message,", "<-> 主机物理层的消息收发。 \"\"\" def __init__(self, device_id: str) -> None: \"\"\"初始化主机网络层。 Args: device_id: 该主机的设备号。", "self.__phy: return bits_to_string(message), False else: continue def receive_from_app(self) -> str: \"\"\"接收来自本机应用层的消息。 Returns: 接收到的消息。", "该主机的设备号。 \"\"\" self.__device_id = device_id self.__app = f\"1{device_id}300\" self.__port = f\"1{device_id}200\" self.__phy =", "NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层 <-> 主机网络层 <-> 主机物理层的消息收发。 \"\"\" def __init__(self, device_id: str) ->", "= [request_frame] # 中间的帧是常规帧。 frame_pool.extend( [ self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[ i * FrameParam.DATA_LEN :", "== MessageType.TEXT else SessionState.REQ_IMG, dst=app_data[\"dst\"], ) frame_pool = [request_frame] # 中间的帧是常规帧。 frame_pool.extend( [", "def should_receive(self, port: str) -> bool: \"\"\"判断本层是否应该接收某帧。 Args: 发来的帧的目的端口号。 Returns: 应该接收为 `True`,不应该接收为 `False`。", "def __init__(self, device_id: str) -> None: \"\"\"初始化主机网络层。 Args: device_id: 该主机的设备号。 \"\"\" self.__device_id =", "<Net Layer @{self.__port}>\\n{'-'*30}\" def receive_all(self) -> tuple[str, bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns: - [0] 接收到的消息。", "message, port, _ = self._receive(bufsize=Network.IN_NE_BUFSIZE) if port == self.__app: return message, True elif", "str) -> int: \"\"\"向本机应用层发送消息。 Args: message: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" return self._send(message, self.__app)", "[1] 接收成功为 `True`,接收超时为 `False`。 \"\"\" binary, _, success = self._receive(timeout=timeout) binary = bits_to_string(binary)", "Returns: 应该接收为 `True`,不应该接收为 `False`。 \"\"\" return port in (self.__app, Topology.BROADCAST_PORT) def build_pool(self, app_data:", "bool]: \"\"\"接收来自本机物理层的消息。 Args: timeout: 可选,接收超时时间,单位为秒,默认为 `RECV_TIMEOUT`。 Returns: - [0] 接收到的消息。 - [1] 接收成功为", "response = self.__parser.parse(binary) return True if response.reply_state == ReplyState.ACK else False def parse_message(self,", "# 中间的帧是常规帧。 frame_pool.extend( [ self.__normal_builder.build( session_state=SessionState.NORMAL, data=message[ i * FrameParam.DATA_LEN : (i +", "def parse_reply(self, binary: str) -> bool: \"\"\"解析回复。 Args: binary: 含有回复的 01 字符串。 Returns:", "self.__parser.parse(binary) return True if response.reply_state == ReplyState.ACK else False def parse_message(self, binary: str)", "<-> 主机网络层 <-> 主机物理层的消息收发。 \"\"\" def __init__(self, device_id: str) -> None: \"\"\"初始化主机网络层。 Args:", "range(frame_num - 1) ] ) # 最后一帧是结束帧。 final_frame = self.__normal_builder.build( session_state=SessionState.FIN, data=message[(frame_num -", "port in (self.__app, Topology.BROADCAST_PORT) def build_pool(self, app_data: dict) -> list[Frame]: \"\"\"将消息打包为帧。 Args: app_data:", "return self.__reply_builder.build(reply_state=ReplyState.ACK, dst=dst) def build_nak(self, dst: str) -> Frame: \"\"\"生成 NAK 帧。 Args:", "bool]: \"\"\"接收来自本机应用层与本机物理层的消息。 Returns: - [0] 接收到的消息。 - [1] 本机应用层发来为 `True`,本机物理层发来为 `False`。 \"\"\" while", "import * from layer._abstract import AbstractLayer class NetLayer(AbstractLayer): \"\"\"主机网络层。 实现了主机应用层 <-> 主机网络层 <->", "要发送的消息。 Returns: 总共发送的字节数。 \"\"\" # 流量控制。 sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary), self.__phy) def should_receive(self, port:", "in (self.__app, Topology.BROADCAST_PORT) def build_pool(self, app_data: dict) -> list[Frame]: \"\"\"将消息打包为帧。 Args: app_data: 本机应用层传来的消息数据。", "int: \"\"\"向本机物理层发送消息。 Args: binary: 要发送的消息。 Returns: 总共发送的字节数。 \"\"\" # 流量控制。 sleep(Network.FLOW_INTERVAL) return self._send(string_to_bits(binary),", "的目的地,即原消息的源。 Returns: 生成的 NAK 帧。 \"\"\" return self.__reply_builder.build(reply_state=ReplyState.NAK, dst=dst) def parse_reply(self, binary: str)" ]
[ "== \"yr\" assert_allclose(table[\"age\"], [100, 1000]) assert table[\"P0\"].unit == \"s\" assert_allclose(table[\"P0\"], [0.214478, 0.246349], atol=1e-5)", "table[\"age\"].unit == \"yr\" assert_allclose(d[\"age\"], 548813.50392732478) assert table[\"n_ISM\"].unit == \"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0) assert table[\"spiralarm\"].unit", "table[\"VGLAT\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"],", "-0.37640823601179485) table = make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"], 0) table = make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"], 0)", "add_pulsar_parameters(table, random_state=0) assert len(table) == 2 assert len(table.colnames) == 10 assert table[\"age\"].unit ==", "add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere, ) def test_make_catalog_random_positions_cube(): table = make_catalog_random_positions_cube(random_state=0) d =", "== 3 assert table[\"x\"].unit == \"pc\" assert_allclose(d[\"x\"], 0.0976270078546495) assert table[\"y\"].unit == \"pc\" assert_allclose(d[\"y\"],", "add_pwn_parameters(table) table = add_observed_parameters(table) d = table[0] # Note: the individual functions are", "d = table[0] assert len(table) == 100 assert len(table.colnames) == 3 assert table[\"x\"].unit", "random_state=0) # To compute PWN parameters we need PSR and SNR parameters first", "assert_allclose, assert_equal import astropy.units as u from astropy.table import Table from gammapy.astro.population import", "add_observed_parameters(table) d = table[0] assert len(table) == 10 assert len(table.colnames) == 20 assert", "assert len(table.colnames) == 10 assert table[\"age\"].unit == \"yr\" assert_allclose(table[\"age\"], [100, 1000]) assert table[\"P0\"].unit", "test_make_catalog_random_positions_sphere(): table = make_catalog_random_positions_sphere(random_state=0) d = table[0] assert len(table) == 100 assert len(table.colnames)", "assert table[\"VGLAT\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5) assert table[\"RA\"].unit == \"deg\"", "table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) d = table[0]", "from astropy.table import Table from gammapy.astro.population import ( add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic,", "so that we make sure we notice changes. assert len(table) == 10 assert", "atol=1e-5) assert table[\"x\"].unit == \"kpc\" assert_allclose(d[\"x\"], -5.941061, atol=1e-5) assert table[\"y\"].unit == \"kpc\" assert_allclose(d[\"y\"],", "= add_pwn_parameters(table) table = add_observed_parameters(table) d = table[0] # Note: the individual functions", "atol=1e-5) assert table[\"CharAge\"].unit == \"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24], atol=1e-5) assert table[\"Tau0\"].unit == \"yr\"", "atol=1e-5) assert table[\"P1_birth\"].unit == \"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16], atol=1e-5) assert table[\"CharAge\"].unit == \"yr\"", "[0.214478, 0.246349], atol=1e-5) assert table[\"P1\"].unit == \"\" assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16], atol=1e-5) assert table[\"P0_birth\"].unit", "add_pwn_parameters(table) d = table[0] assert len(table) == 10 assert len(table.colnames) == 27 assert", "== \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) def test_add_observed_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) table =", "assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5) assert table[\"DEC\"].unit ==", "* u.yr table[\"n_ISM\"] = u.Quantity(1, \"cm-3\") table = add_snr_parameters(table) assert len(table) == 2", "-5.856461, atol=1e-5) assert table[\"y_birth\"].unit == \"kpc\" assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5) assert table[\"z_birth\"].unit == \"kpc\"", "table[\"GLON\"].unit == \"deg\" assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5) assert table[\"GLAT\"].unit == \"deg\" assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5)", "the simulation functions in chain works table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_snr_parameters(table)", "= add_snr_parameters(table) assert len(table) == 2 assert table.colnames == [\"age\", \"n_ISM\", \"E_SN\", \"r_out\",", "assert table[\"x_birth\"].unit == \"kpc\" assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5) assert table[\"y_birth\"].unit == \"kpc\" assert_allclose(d[\"y_birth\"], 3.017292,", "assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16], atol=1e-5) assert table[\"P0_birth\"].unit == \"s\" assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336], atol=1e-5) assert", "assert_allclose(table[\"E_SN\"], 1e51) assert table[\"r_out\"].unit == \"pc\" assert_allclose(table[\"r_out\"], [1, 3.80730787743]) assert table[\"r_in\"].unit == \"pc\"", "PSR and SNR parameters first table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table", "assert_equal(table[\"z\"], 0) def test_make_catalog_random_positions_sphere(): table = make_catalog_random_positions_sphere(random_state=0) d = table[0] assert len(table) ==", "-27.156565, atol=1e-5) assert table[\"GLAT\"].unit == \"deg\" assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5) assert table[\"VGLON\"].unit == \"deg", "assert table[\"z_birth\"].unit == \"kpc\" assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5) assert table[\"x\"].unit == \"kpc\" assert_allclose(d[\"x\"], -5.941061,", "changes. assert len(table) == 10 assert len(table.colnames) == 34 assert table[\"r_out_PWN\"].unit == \"pc\"", "Table from gammapy.astro.population import ( add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere, )", "\"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11], rtol=1e-5) def test_add_pwn_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) # To", "assert table.colnames == [\"age\", \"n_ISM\", \"E_SN\", \"r_out\", \"r_in\", \"L_SNR\"] assert table[\"E_SN\"].unit == \"erg\"", "assert table[\"x\"].unit == \"pc\" assert_allclose(d[\"x\"], 0.0976270078546495) assert table[\"y\"].unit == \"pc\" assert_allclose(d[\"y\"], 0.3556330735924602) assert", "assert table[\"v_abs\"].unit == \"km/s\" assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5) def test_add_snr_parameters(): table = Table() table[\"age\"]", "table = Table() table[\"age\"] = [100, 1000] * u.yr table = add_pulsar_parameters(table, random_state=0)", "table = add_observed_parameters(table) d = table[0] # Note: the individual functions are tested", "just run them in a chain and do very basic asserts # on", "== \"erg / s\" assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33], rtol=1e-5) assert table[\"L0_PSR\"].unit == \"erg /", "add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) d = table[0] assert len(table)", "d = table[0] assert len(table) == 10 assert len(table.colnames) == 20 assert table[\"distance\"].unit", "assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5) assert table[\"y_birth\"].unit == \"kpc\" assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5) assert table[\"z_birth\"].unit ==", "/ Myr\" assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5) assert table[\"VGLAT\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLAT\"], -0.209514,", "add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere, ) def test_make_catalog_random_positions_cube(): table = make_catalog_random_positions_cube(random_state=0) d", "assert len(table) == 2 assert len(table.colnames) == 10 assert table[\"age\"].unit == \"yr\" assert_allclose(table[\"age\"],", "table[\"VGLON\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5) assert table[\"VGLAT\"].unit == \"deg /", "def test_make_catalog_random_positions_sphere(): table = make_catalog_random_positions_sphere(random_state=0) d = table[0] assert len(table) == 100 assert", "[0.9086, 3.45931993743]) assert table[\"L_SNR\"].unit == \"1 / s\" assert_allclose(table[\"L_SNR\"], [0, 1.0768e33]) def test_add_pulsar_parameters():", "s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33], rtol=1e-5) assert table[\"B_PSR\"].unit == \"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11], rtol=1e-5)", "3.081642, atol=1e-5) assert table[\"z\"].unit == \"kpc\" assert_allclose(d[\"z\"], 0.023161, atol=1e-5) assert table[\"vx\"].unit == \"km/s\"", "do very basic asserts # on the output so that we make sure", "assert table[\"L0_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33], rtol=1e-5) assert table[\"B_PSR\"].unit ==", "= table[0] assert len(table) == 100 assert len(table.colnames) == 3 assert table[\"x\"].unit ==", "== \"deg\" assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5) def test_chain_all(): # Test that running the simulation", "\"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0) assert table[\"spiralarm\"].unit is None assert d[\"spiralarm\"] == \"Crux Scutum\" assert", "= table[0] assert len(table) == 10 assert len(table.colnames) == 13 assert table[\"age\"].unit ==", "\"deg / Myr\" assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5) assert table[\"VGLAT\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLAT\"],", "4.199198e-16], atol=1e-5) assert table[\"CharAge\"].unit == \"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24], atol=1e-5) assert table[\"Tau0\"].unit ==", "assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16], atol=1e-5) assert table[\"CharAge\"].unit == \"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24], atol=1e-5) assert", "a chain and do very basic asserts # on the output so that", "assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5) assert table[\"VGLAT\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5) assert", "chain works table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0)", "len(table) == 100 assert len(table.colnames) == 3 assert table[\"lon\"].unit == \"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128)", "the output so that we make sure we notice changes. assert len(table) ==", "def test_add_observed_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_observed_parameters(table) d = table[0] assert", "SNR parameters first table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table)", "assert_allclose(d[\"age\"], 548813.50392732478) assert table[\"n_ISM\"].unit == \"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0) assert table[\"spiralarm\"].unit is None assert", "= add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) table = add_observed_parameters(table) d", "0.3556330735924602) assert table[\"z\"].unit == \"pc\" assert_allclose(d[\"z\"], -0.37640823601179485) table = make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"], 0)", "table[\"P0\"].unit == \"s\" assert_allclose(table[\"P0\"], [0.214478, 0.246349], atol=1e-5) assert table[\"P1\"].unit == \"\" assert_allclose(table[\"P1\"], [6.310423e-13,", "assert len(table.colnames) == 20 assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5) assert table[\"GLON\"].unit", "= make_base_catalog_galactic(n_sources=10, random_state=0) d = table[0] assert len(table) == 10 assert len(table.colnames) ==", "notice changes. assert len(table) == 10 assert len(table.colnames) == 34 assert table[\"r_out_PWN\"].unit ==", "from numpy.testing import assert_allclose, assert_equal import astropy.units as u from astropy.table import Table", "assert table[\"r_out\"].unit == \"pc\" assert_allclose(table[\"r_out\"], [1, 3.80730787743]) assert table[\"r_in\"].unit == \"pc\" assert_allclose(table[\"r_in\"], [0.9086,", "100 assert len(table.colnames) == 3 assert table[\"lon\"].unit == \"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128) assert table[\"lat\"].unit", "atol=1e-5) assert table[\"z_birth\"].unit == \"kpc\" assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5) assert table[\"x\"].unit == \"kpc\" assert_allclose(d[\"x\"],", "\"E_SN\", \"r_out\", \"r_in\", \"L_SNR\"] assert table[\"E_SN\"].unit == \"erg\" assert_allclose(table[\"E_SN\"], 1e51) assert table[\"r_out\"].unit ==", "= make_catalog_random_positions_cube(random_state=0) d = table[0] assert len(table) == 100 assert len(table.colnames) == 3", "parameters first table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) d", "assert table[\"z\"].unit == \"kpc\" assert_allclose(d[\"z\"], 0.023161, atol=1e-5) assert table[\"vx\"].unit == \"km/s\" assert_allclose(d[\"vx\"], -150.727104,", "( add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere, ) def test_make_catalog_random_positions_cube(): table =", "== \"s\" assert_allclose(table[\"P0\"], [0.214478, 0.246349], atol=1e-5) assert table[\"P1\"].unit == \"\" assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16],", "3 assert table[\"lon\"].unit == \"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128) assert table[\"lat\"].unit == \"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267)", "d = table[0] assert len(table) == 10 assert len(table.colnames) == 13 assert table[\"age\"].unit", "assert table[\"lat\"].unit == \"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267) assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606) def", "0.368166, atol=1e-5) assert table[\"VGLAT\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5) assert table[\"RA\"].unit", "chain and do very basic asserts # on the output so that we", "table[\"y\"].unit == \"pc\" assert_allclose(d[\"y\"], 0.3556330735924602) assert table[\"z\"].unit == \"pc\" assert_allclose(d[\"z\"], -0.37640823601179485) table =", "s\" assert_allclose(table[\"L_SNR\"], [0, 1.0768e33]) def test_add_pulsar_parameters(): table = Table() table[\"age\"] = [100, 1000]", "table[\"age\"] = [100, 1000] * u.yr table[\"n_ISM\"] = u.Quantity(1, \"cm-3\") table = add_snr_parameters(table)", "len(table.colnames) == 3 assert table[\"lon\"].unit == \"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128) assert table[\"lat\"].unit == \"rad\"", "assert table[\"E_SN\"].unit == \"erg\" assert_allclose(table[\"E_SN\"], 1e51) assert table[\"r_out\"].unit == \"pc\" assert_allclose(table[\"r_out\"], [1, 3.80730787743])", "we notice changes. assert len(table) == 10 assert len(table.colnames) == 34 assert table[\"r_out_PWN\"].unit", "\"yr\" assert_allclose(table[\"age\"], [100, 1000]) assert table[\"P0\"].unit == \"s\" assert_allclose(table[\"P0\"], [0.214478, 0.246349], atol=1e-5) assert", "== 3 assert table[\"lon\"].unit == \"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128) assert table[\"lat\"].unit == \"rad\" assert_allclose(d[\"lat\"],", "== \"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5) assert table[\"DEC\"].unit == \"deg\" assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5) def", "assert_allclose(d[\"RA\"], 244.347149, atol=1e-5) assert table[\"DEC\"].unit == \"deg\" assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5) def test_chain_all(): #", "assert_allclose(table[\"L_SNR\"], [0, 1.0768e33]) def test_add_pulsar_parameters(): table = Table() table[\"age\"] = [100, 1000] *", "3.45931993743]) assert table[\"L_SNR\"].unit == \"1 / s\" assert_allclose(table[\"L_SNR\"], [0, 1.0768e33]) def test_add_pulsar_parameters(): table", "table[\"vy\"].unit == \"km/s\" assert_allclose(d[\"vy\"], 114.648494, atol=1e-5) assert table[\"vz\"].unit == \"km/s\" assert_allclose(d[\"vz\"], -46.193814, atol=1e-5)", "add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) table = add_observed_parameters(table) d = table[0] # Note:", "== 27 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) def test_add_observed_parameters(): table =", "them in a chain and do very basic asserts # on the output", "# Licensed under a 3-clause BSD style license - see LICENSE.rst from numpy.testing", "== 13 assert table[\"age\"].unit == \"yr\" assert_allclose(d[\"age\"], 548813.50392732478) assert table[\"n_ISM\"].unit == \"cm-3\" assert_allclose(d[\"n_ISM\"],", "To compute PWN parameters we need PSR and SNR parameters first table =", "astropy.units as u from astropy.table import Table from gammapy.astro.population import ( add_observed_parameters, add_pulsar_parameters,", "table = make_base_catalog_galactic(n_sources=10, random_state=0) # To compute PWN parameters we need PSR and", "make_base_catalog_galactic(n_sources=10, random_state=0) table = add_observed_parameters(table) d = table[0] assert len(table) == 10 assert", "table = add_pulsar_parameters(table, random_state=0) assert len(table) == 2 assert len(table.colnames) == 10 assert", "= make_catalog_random_positions_sphere(random_state=0) d = table[0] assert len(table) == 100 assert len(table.colnames) == 3", "atol=1e-5) assert table[\"P0_birth\"].unit == \"s\" assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336], atol=1e-5) assert table[\"P1_birth\"].unit == \"\"", "make_catalog_random_positions_cube(random_state=0) d = table[0] assert len(table) == 100 assert len(table.colnames) == 3 assert", "make_catalog_random_positions_sphere(random_state=0) d = table[0] assert len(table) == 100 assert len(table.colnames) == 3 assert", "assert table[\"GLAT\"].unit == \"deg\" assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5) assert table[\"VGLON\"].unit == \"deg / Myr\"", "= add_observed_parameters(table) d = table[0] assert len(table) == 10 assert len(table.colnames) == 20", "20 assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5) assert table[\"GLON\"].unit == \"deg\" assert_allclose(d[\"GLON\"],", "table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5) assert table[\"DEC\"].unit == \"deg\" assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5)", "atol=1e-5) assert table[\"Tau0\"].unit == \"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06], atol=1e-5) assert table[\"L_PSR\"].unit == \"erg", "assert table[\"vy\"].unit == \"km/s\" assert_allclose(d[\"vy\"], 114.648494, atol=1e-5) assert table[\"vz\"].unit == \"km/s\" assert_allclose(d[\"vz\"], -46.193814,", "rtol=1e-5) assert table[\"B_PSR\"].unit == \"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11], rtol=1e-5) def test_add_pwn_parameters(): table =", "d = table[0] assert len(table) == 100 assert len(table.colnames) == 3 assert table[\"lon\"].unit", "\"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16], atol=1e-5) assert table[\"CharAge\"].unit == \"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24], atol=1e-5)", "== 10 assert table[\"age\"].unit == \"yr\" assert_allclose(table[\"age\"], [100, 1000]) assert table[\"P0\"].unit == \"s\"", "astropy.table import Table from gammapy.astro.population import ( add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube,", "running the simulation functions in chain works table = make_base_catalog_galactic(n_sources=10, random_state=0) table =", "len(table) == 2 assert table.colnames == [\"age\", \"n_ISM\", \"E_SN\", \"r_out\", \"r_in\", \"L_SNR\"] assert", "1000]) assert table[\"P0\"].unit == \"s\" assert_allclose(table[\"P0\"], [0.214478, 0.246349], atol=1e-5) assert table[\"P1\"].unit == \"\"", "1e51) assert table[\"r_out\"].unit == \"pc\" assert_allclose(table[\"r_out\"], [1, 3.80730787743]) assert table[\"r_in\"].unit == \"pc\" assert_allclose(table[\"r_in\"],", "len(table.colnames) == 10 assert table[\"age\"].unit == \"yr\" assert_allclose(table[\"age\"], [100, 1000]) assert table[\"P0\"].unit ==", "assert table[\"DEC\"].unit == \"deg\" assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5) def test_chain_all(): # Test that running", "random_state=0) table = add_observed_parameters(table) d = table[0] assert len(table) == 10 assert len(table.colnames)", "= [100, 1000] * u.yr table = add_pulsar_parameters(table, random_state=0) assert len(table) == 2", "-5.941061, atol=1e-5) assert table[\"y\"].unit == \"kpc\" assert_allclose(d[\"y\"], 3.081642, atol=1e-5) assert table[\"z\"].unit == \"kpc\"", "assert table[\"y\"].unit == \"kpc\" assert_allclose(d[\"y\"], 3.081642, atol=1e-5) assert table[\"z\"].unit == \"kpc\" assert_allclose(d[\"z\"], 0.023161,", "random_state=0) assert_equal(table[\"z\"], 0) table = make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"], 0) def test_make_catalog_random_positions_sphere():", "assert_allclose(d[\"vz\"], -46.193814, atol=1e-5) assert table[\"v_abs\"].unit == \"km/s\" assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5) def test_add_snr_parameters(): table", "parameters we need PSR and SNR parameters first table = add_snr_parameters(table) table =", "assert_allclose(d[\"lat\"], 0.36359133530192267) assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606) def test_make_base_catalog_galactic(): table = make_base_catalog_galactic(n_sources=10,", "need PSR and SNR parameters first table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0)", "2 assert len(table.colnames) == 10 assert table[\"age\"].unit == \"yr\" assert_allclose(table[\"age\"], [100, 1000]) assert", "table = add_pwn_parameters(table) table = add_observed_parameters(table) d = table[0] # Note: the individual", "assert table[\"x\"].unit == \"kpc\" assert_allclose(d[\"x\"], -5.941061, atol=1e-5) assert table[\"y\"].unit == \"kpc\" assert_allclose(d[\"y\"], 3.081642,", "assert_allclose(table[\"r_out\"], [1, 3.80730787743]) assert table[\"r_in\"].unit == \"pc\" assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743]) assert table[\"L_SNR\"].unit ==", "\"pc\" assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5) assert table[\"GLON\"].unit == \"deg\" assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5) assert table[\"GLAT\"].unit", "\"pc\" assert_allclose(d[\"y\"], 0.3556330735924602) assert table[\"z\"].unit == \"pc\" assert_allclose(d[\"z\"], -0.37640823601179485) table = make_catalog_random_positions_cube(dimension=2, random_state=0)", "test_add_pulsar_parameters(): table = Table() table[\"age\"] = [100, 1000] * u.yr table = add_pulsar_parameters(table,", "548813.50392732478) assert table[\"n_ISM\"].unit == \"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0) assert table[\"spiralarm\"].unit is None assert d[\"spiralarm\"]", "len(table) == 10 assert len(table.colnames) == 13 assert table[\"age\"].unit == \"yr\" assert_allclose(d[\"age\"], 548813.50392732478)", "import ( add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere, ) def test_make_catalog_random_positions_cube(): table", "27 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) def test_add_observed_parameters(): table = make_base_catalog_galactic(n_sources=10,", "add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere, ) def test_make_catalog_random_positions_cube(): table = make_catalog_random_positions_cube(random_state=0)", "10 assert len(table.colnames) == 13 assert table[\"age\"].unit == \"yr\" assert_allclose(d[\"age\"], 548813.50392732478) assert table[\"n_ISM\"].unit", "/ Myr\" assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5) assert", "= make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"], 0) table = make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"], 0)", "test_add_pwn_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) # To compute PWN parameters we need PSR", "simulation functions in chain works table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_snr_parameters(table) table", "1.638930e-24], atol=1e-5) assert table[\"Tau0\"].unit == \"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06], atol=1e-5) assert table[\"L_PSR\"].unit ==", "gammapy.astro.population import ( add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere, ) def test_make_catalog_random_positions_cube():", "import astropy.units as u from astropy.table import Table from gammapy.astro.population import ( add_observed_parameters,", "244.347149, atol=1e-5) assert table[\"DEC\"].unit == \"deg\" assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5) def test_chain_all(): # Test", "assert len(table) == 10 assert len(table.colnames) == 20 assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"],", "table[\"P1_birth\"].unit == \"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16], atol=1e-5) assert table[\"CharAge\"].unit == \"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21,", "Here we just run them in a chain and do very basic asserts", "\"deg\" assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5) def test_chain_all(): # Test that running the simulation functions", "assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149,", ") def test_make_catalog_random_positions_cube(): table = make_catalog_random_positions_cube(random_state=0) d = table[0] assert len(table) == 100", "9.294538e06], atol=1e-5) assert table[\"L_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33], rtol=1e-5) assert", "def test_add_pulsar_parameters(): table = Table() table[\"age\"] = [100, 1000] * u.yr table =", "\"deg\" assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5) assert table[\"VGLON\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5)", "= u.Quantity(1, \"cm-3\") table = add_snr_parameters(table) assert len(table) == 2 assert table.colnames ==", "table[\"vz\"].unit == \"km/s\" assert_allclose(d[\"vz\"], -46.193814, atol=1e-5) assert table[\"v_abs\"].unit == \"km/s\" assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5)", "make_base_catalog_galactic(n_sources=10, random_state=0) table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) table", "run them in a chain and do very basic asserts # on the", "assert table[\"Tau0\"].unit == \"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06], atol=1e-5) assert table[\"L_PSR\"].unit == \"erg /", "[1.194420e13, 3.254597e11], rtol=1e-5) def test_add_pwn_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) # To compute PWN", "== \"kpc\" assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5) assert table[\"y_birth\"].unit == \"kpc\" assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5) assert", "assert_allclose(table[\"age\"], [100, 1000]) assert table[\"P0\"].unit == \"s\" assert_allclose(table[\"P0\"], [0.214478, 0.246349], atol=1e-5) assert table[\"P1\"].unit", "1.0) assert table[\"spiralarm\"].unit is None assert d[\"spiralarm\"] == \"Crux Scutum\" assert table[\"x_birth\"].unit ==", "table[\"vx\"].unit == \"km/s\" assert_allclose(d[\"vx\"], -150.727104, atol=1e-5) assert table[\"vy\"].unit == \"km/s\" assert_allclose(d[\"vy\"], 114.648494, atol=1e-5)", "\"kpc\" assert_allclose(d[\"x\"], -5.941061, atol=1e-5) assert table[\"y\"].unit == \"kpc\" assert_allclose(d[\"y\"], 3.081642, atol=1e-5) assert table[\"z\"].unit", "= add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) table = add_observed_parameters(table) d = table[0] #", "None assert d[\"spiralarm\"] == \"Crux Scutum\" assert table[\"x_birth\"].unit == \"kpc\" assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5)", "== 10 assert len(table.colnames) == 13 assert table[\"age\"].unit == \"yr\" assert_allclose(d[\"age\"], 548813.50392732478) assert", "from gammapy.astro.population import ( add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere, ) def", "atol=1e-5) assert table[\"y\"].unit == \"kpc\" assert_allclose(d[\"y\"], 3.081642, atol=1e-5) assert table[\"z\"].unit == \"kpc\" assert_allclose(d[\"z\"],", "table = add_pwn_parameters(table) d = table[0] assert len(table) == 10 assert len(table.colnames) ==", "\"Crux Scutum\" assert table[\"x_birth\"].unit == \"kpc\" assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5) assert table[\"y_birth\"].unit == \"kpc\"", "table[\"r_in\"].unit == \"pc\" assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743]) assert table[\"L_SNR\"].unit == \"1 / s\" assert_allclose(table[\"L_SNR\"],", "table[0] assert len(table) == 10 assert len(table.colnames) == 20 assert table[\"distance\"].unit == \"pc\"", "\"n_ISM\", \"E_SN\", \"r_out\", \"r_in\", \"L_SNR\"] assert table[\"E_SN\"].unit == \"erg\" assert_allclose(table[\"E_SN\"], 1e51) assert table[\"r_out\"].unit", "atol=1e-5) assert table[\"GLAT\"].unit == \"deg\" assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5) assert table[\"VGLON\"].unit == \"deg /", "assert len(table) == 10 assert len(table.colnames) == 34 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"],", "== \"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06], atol=1e-5) assert table[\"L_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L_PSR\"],", "atol=1e-5) assert table[\"vz\"].unit == \"km/s\" assert_allclose(d[\"vz\"], -46.193814, atol=1e-5) assert table[\"v_abs\"].unit == \"km/s\" assert_allclose(d[\"v_abs\"],", "under a 3-clause BSD style license - see LICENSE.rst from numpy.testing import assert_allclose,", "assert table[\"vx\"].unit == \"km/s\" assert_allclose(d[\"vx\"], -150.727104, atol=1e-5) assert table[\"vy\"].unit == \"km/s\" assert_allclose(d[\"vy\"], 114.648494,", "import assert_allclose, assert_equal import astropy.units as u from astropy.table import Table from gammapy.astro.population", "-150.727104, atol=1e-5) assert table[\"vy\"].unit == \"km/s\" assert_allclose(d[\"vy\"], 114.648494, atol=1e-5) assert table[\"vz\"].unit == \"km/s\"", "rtol=1e-5) def test_add_pwn_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) # To compute PWN parameters we", "assert_allclose(d[\"x\"], 0.0976270078546495) assert table[\"y\"].unit == \"pc\" assert_allclose(d[\"y\"], 0.3556330735924602) assert table[\"z\"].unit == \"pc\" assert_allclose(d[\"z\"],", "# To compute PWN parameters we need PSR and SNR parameters first table", "random_state=0) table = add_pwn_parameters(table) d = table[0] assert len(table) == 10 assert len(table.colnames)", "\"km/s\" assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5) def test_add_snr_parameters(): table = Table() table[\"age\"] = [100, 1000]", "len(table.colnames) == 27 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) def test_add_observed_parameters(): table", "= add_pwn_parameters(table) d = table[0] assert len(table) == 10 assert len(table.colnames) == 27", "== \"kpc\" assert_allclose(d[\"z\"], 0.023161, atol=1e-5) assert table[\"vx\"].unit == \"km/s\" assert_allclose(d[\"vx\"], -150.727104, atol=1e-5) assert", "\"kpc\" assert_allclose(d[\"y\"], 3.081642, atol=1e-5) assert table[\"z\"].unit == \"kpc\" assert_allclose(d[\"z\"], 0.023161, atol=1e-5) assert table[\"vx\"].unit", "-46.193814, atol=1e-5) assert table[\"v_abs\"].unit == \"km/s\" assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5) def test_add_snr_parameters(): table =", "assert len(table) == 10 assert len(table.colnames) == 27 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"],", "table = make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"], 0) def test_make_catalog_random_positions_sphere(): table = make_catalog_random_positions_sphere(random_state=0)", "assert table[\"y_birth\"].unit == \"kpc\" assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5) assert table[\"z_birth\"].unit == \"kpc\" assert_allclose(d[\"z_birth\"], 0.049088,", "assert_allclose(d[\"z\"], -0.37640823601179485) table = make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"], 0) table = make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"],", "table[\"L0_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33], rtol=1e-5) assert table[\"B_PSR\"].unit == \"G\"", "make_base_catalog_galactic(n_sources=10, random_state=0) d = table[0] assert len(table) == 10 assert len(table.colnames) == 13", "table[0] assert len(table) == 10 assert len(table.colnames) == 13 assert table[\"age\"].unit == \"yr\"", "2 assert table.colnames == [\"age\", \"n_ISM\", \"E_SN\", \"r_out\", \"r_in\", \"L_SNR\"] assert table[\"E_SN\"].unit ==", "1.109026e33], rtol=1e-5) assert table[\"B_PSR\"].unit == \"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11], rtol=1e-5) def test_add_pwn_parameters(): table", "basic asserts # on the output so that we make sure we notice", "style license - see LICENSE.rst from numpy.testing import assert_allclose, assert_equal import astropy.units as", "3.4482969442579128) assert table[\"lat\"].unit == \"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267) assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606)", "assert len(table.colnames) == 3 assert table[\"lon\"].unit == \"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128) assert table[\"lat\"].unit ==", "\"pc\" assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743]) assert table[\"L_SNR\"].unit == \"1 / s\" assert_allclose(table[\"L_SNR\"], [0, 1.0768e33])", "== 10 assert len(table.colnames) == 27 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4)", "as u from astropy.table import Table from gammapy.astro.population import ( add_observed_parameters, add_pulsar_parameters, add_pwn_parameters,", "== \"pc\" assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5) assert table[\"GLON\"].unit == \"deg\" assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5) assert", "atol=1e-5) assert table[\"P1\"].unit == \"\" assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16], atol=1e-5) assert table[\"P0_birth\"].unit == \"s\"", "\"pc\" assert_allclose(d[\"z\"], -0.37640823601179485) table = make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"], 0) table = make_catalog_random_positions_cube(dimension=1, random_state=0)", "assert len(table.colnames) == 3 assert table[\"x\"].unit == \"pc\" assert_allclose(d[\"x\"], 0.0976270078546495) assert table[\"y\"].unit ==", "table[0] assert len(table) == 100 assert len(table.colnames) == 3 assert table[\"lon\"].unit == \"rad\"", "assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33], rtol=1e-5) assert table[\"L0_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33],", "0.023161, atol=1e-5) assert table[\"vx\"].unit == \"km/s\" assert_allclose(d[\"vx\"], -150.727104, atol=1e-5) assert table[\"vy\"].unit == \"km/s\"", "assert len(table) == 100 assert len(table.colnames) == 3 assert table[\"x\"].unit == \"pc\" assert_allclose(d[\"x\"],", "assert_allclose(d[\"distance\"], 0.6780943487897606) def test_make_base_catalog_galactic(): table = make_base_catalog_galactic(n_sources=10, random_state=0) d = table[0] assert len(table)", "== 20 assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5) assert table[\"GLON\"].unit == \"deg\"", "\"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128) assert table[\"lat\"].unit == \"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267) assert table[\"distance\"].unit == \"pc\"", "[1, 3.80730787743]) assert table[\"r_in\"].unit == \"pc\" assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743]) assert table[\"L_SNR\"].unit == \"1", "in a chain and do very basic asserts # on the output so", "== \"Crux Scutum\" assert table[\"x_birth\"].unit == \"kpc\" assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5) assert table[\"y_birth\"].unit ==", "assert table[\"L_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33], rtol=1e-5) assert table[\"L0_PSR\"].unit ==", "assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5) assert table[\"VGLON\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5) assert", "assert_allclose(d[\"y\"], 0.3556330735924602) assert table[\"z\"].unit == \"pc\" assert_allclose(d[\"z\"], -0.37640823601179485) table = make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"],", "we make sure we notice changes. assert len(table) == 10 assert len(table.colnames) ==", "assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"], 0) def test_make_catalog_random_positions_sphere(): table = make_catalog_random_positions_sphere(random_state=0) d = table[0] assert", "= add_pulsar_parameters(table, random_state=0) assert len(table) == 2 assert len(table.colnames) == 10 assert table[\"age\"].unit", "PWN parameters we need PSR and SNR parameters first table = add_snr_parameters(table) table", "assert d[\"spiralarm\"] == \"Crux Scutum\" assert table[\"x_birth\"].unit == \"kpc\" assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5) assert", "test_make_catalog_random_positions_cube(): table = make_catalog_random_positions_cube(random_state=0) d = table[0] assert len(table) == 100 assert len(table.colnames)", "table[\"spiralarm\"].unit is None assert d[\"spiralarm\"] == \"Crux Scutum\" assert table[\"x_birth\"].unit == \"kpc\" assert_allclose(d[\"x_birth\"],", "\"erg / s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33], rtol=1e-5) assert table[\"B_PSR\"].unit == \"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13,", "compute PWN parameters we need PSR and SNR parameters first table = add_snr_parameters(table)", "114.648494, atol=1e-5) assert table[\"vz\"].unit == \"km/s\" assert_allclose(d[\"vz\"], -46.193814, atol=1e-5) assert table[\"v_abs\"].unit == \"km/s\"", "assert table[\"P1\"].unit == \"\" assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16], atol=1e-5) assert table[\"P0_birth\"].unit == \"s\" assert_allclose(table[\"P0_birth\"],", "table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) def test_add_observed_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) table", "table[\"L_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33], rtol=1e-5) assert table[\"L0_PSR\"].unit == \"erg", "len(table) == 2 assert len(table.colnames) == 10 assert table[\"age\"].unit == \"yr\" assert_allclose(table[\"age\"], [100,", "== \"\" assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16], atol=1e-5) assert table[\"P0_birth\"].unit == \"s\" assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336],", "len(table) == 100 assert len(table.colnames) == 3 assert table[\"x\"].unit == \"pc\" assert_allclose(d[\"x\"], 0.0976270078546495)", "table[\"P1\"].unit == \"\" assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16], atol=1e-5) assert table[\"P0_birth\"].unit == \"s\" assert_allclose(table[\"P0_birth\"], [0.212418,", "Myr\" assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5) assert table[\"DEC\"].unit", "[6.558773e-13, 4.199198e-16], atol=1e-5) assert table[\"CharAge\"].unit == \"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24], atol=1e-5) assert table[\"Tau0\"].unit", "add_observed_parameters(table) d = table[0] # Note: the individual functions are tested above. #", "table = make_catalog_random_positions_cube(random_state=0) d = table[0] assert len(table) == 100 assert len(table.colnames) ==", "assert table[\"vz\"].unit == \"km/s\" assert_allclose(d[\"vz\"], -46.193814, atol=1e-5) assert table[\"v_abs\"].unit == \"km/s\" assert_allclose(d[\"v_abs\"], 194.927693,", "= add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) d = table[0] assert len(table) == 10", "# Here we just run them in a chain and do very basic", "assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5) assert table[\"GLON\"].unit == \"deg\" assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5) assert table[\"GLAT\"].unit ==", "make sure we notice changes. assert len(table) == 10 assert len(table.colnames) == 34", "table = make_catalog_random_positions_sphere(random_state=0) d = table[0] assert len(table) == 100 assert len(table.colnames) ==", "random_state=0) table = add_pwn_parameters(table) table = add_observed_parameters(table) d = table[0] # Note: the", "/ s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33], rtol=1e-5) assert table[\"B_PSR\"].unit == \"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11],", "= make_base_catalog_galactic(n_sources=10, random_state=0) table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table)", "\"pc\" assert_allclose(table[\"r_out\"], [1, 3.80730787743]) assert table[\"r_in\"].unit == \"pc\" assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743]) assert table[\"L_SNR\"].unit", "\"deg / Myr\" assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5)", "rtol=1e-5) assert table[\"L0_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33], rtol=1e-5) assert table[\"B_PSR\"].unit", "individual functions are tested above. # Here we just run them in a", "\"r_out\", \"r_in\", \"L_SNR\"] assert table[\"E_SN\"].unit == \"erg\" assert_allclose(table[\"E_SN\"], 1e51) assert table[\"r_out\"].unit == \"pc\"", "\"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24], atol=1e-5) assert table[\"Tau0\"].unit == \"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06], atol=1e-5)", "Table() table[\"age\"] = [100, 1000] * u.yr table[\"n_ISM\"] = u.Quantity(1, \"cm-3\") table =", "[2.207394e-21, 1.638930e-24], atol=1e-5) assert table[\"Tau0\"].unit == \"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06], atol=1e-5) assert table[\"L_PSR\"].unit", "* u.yr table = add_pulsar_parameters(table, random_state=0) assert len(table) == 2 assert len(table.colnames) ==", "== 2 assert len(table.colnames) == 10 assert table[\"age\"].unit == \"yr\" assert_allclose(table[\"age\"], [100, 1000])", "== 10 assert len(table.colnames) == 20 assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5)", "and do very basic asserts # on the output so that we make", "[6.310423e-13, 4.198294e-16], atol=1e-5) assert table[\"P0_birth\"].unit == \"s\" assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336], atol=1e-5) assert table[\"P1_birth\"].unit", "\"km/s\" assert_allclose(d[\"vx\"], -150.727104, atol=1e-5) assert table[\"vy\"].unit == \"km/s\" assert_allclose(d[\"vy\"], 114.648494, atol=1e-5) assert table[\"vz\"].unit", "assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5) assert table[\"GLON\"].unit == \"deg\" assert_allclose(d[\"GLON\"], -27.156565,", "13016.572756, atol=1e-5) assert table[\"GLON\"].unit == \"deg\" assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5) assert table[\"GLAT\"].unit == \"deg\"", "assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5) assert table[\"GLAT\"].unit == \"deg\" assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5) assert table[\"VGLON\"].unit ==", "assert len(table) == 10 assert len(table.colnames) == 13 assert table[\"age\"].unit == \"yr\" assert_allclose(d[\"age\"],", "194.927693, atol=1e-5) def test_add_snr_parameters(): table = Table() table[\"age\"] = [100, 1000] * u.yr", "table = add_observed_parameters(table) d = table[0] assert len(table) == 10 assert len(table.colnames) ==", "[2.701524e36, 1.109026e33], rtol=1e-5) assert table[\"B_PSR\"].unit == \"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11], rtol=1e-5) def test_add_pwn_parameters():", "= table[0] assert len(table) == 10 assert len(table.colnames) == 20 assert table[\"distance\"].unit ==", "== 2 assert table.colnames == [\"age\", \"n_ISM\", \"E_SN\", \"r_out\", \"r_in\", \"L_SNR\"] assert table[\"E_SN\"].unit", "\"\" assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16], atol=1e-5) assert table[\"P0_birth\"].unit == \"s\" assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336], atol=1e-5)", "table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) d = table[0] assert len(table) ==", "test_add_snr_parameters(): table = Table() table[\"age\"] = [100, 1000] * u.yr table[\"n_ISM\"] = u.Quantity(1,", "= make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"], 0) def test_make_catalog_random_positions_sphere(): table = make_catalog_random_positions_sphere(random_state=0) d", "assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5) assert table[\"DEC\"].unit == \"deg\" assert_allclose(d[\"DEC\"], -50.410142,", "assert table[\"age\"].unit == \"yr\" assert_allclose(d[\"age\"], 548813.50392732478) assert table[\"n_ISM\"].unit == \"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0) assert", "== \"yr\" assert_allclose(d[\"age\"], 548813.50392732478) assert table[\"n_ISM\"].unit == \"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0) assert table[\"spiralarm\"].unit is", "10 assert len(table.colnames) == 27 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) def", "== \"1 / s\" assert_allclose(table[\"L_SNR\"], [0, 1.0768e33]) def test_add_pulsar_parameters(): table = Table() table[\"age\"]", "[5.131385e03, 9.294538e06], atol=1e-5) assert table[\"L_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33], rtol=1e-5)", "make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"], 0) def test_make_catalog_random_positions_sphere(): table = make_catalog_random_positions_sphere(random_state=0) d =", "\"pc\" assert_allclose(d[\"x\"], 0.0976270078546495) assert table[\"y\"].unit == \"pc\" assert_allclose(d[\"y\"], 0.3556330735924602) assert table[\"z\"].unit == \"pc\"", "[0.212418, 0.246336], atol=1e-5) assert table[\"P1_birth\"].unit == \"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16], atol=1e-5) assert table[\"CharAge\"].unit", "numpy.testing import assert_allclose, assert_equal import astropy.units as u from astropy.table import Table from", "def test_make_catalog_random_positions_cube(): table = make_catalog_random_positions_cube(random_state=0) d = table[0] assert len(table) == 100 assert", "== \"kpc\" assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5) assert table[\"z_birth\"].unit == \"kpc\" assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5) assert", "== \"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0) assert table[\"spiralarm\"].unit is None assert d[\"spiralarm\"] == \"Crux Scutum\"", "table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5)", "[2.599229e36, 1.108788e33], rtol=1e-5) assert table[\"L0_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33], rtol=1e-5)", "\"kpc\" assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5) assert table[\"x\"].unit == \"kpc\" assert_allclose(d[\"x\"], -5.941061, atol=1e-5) assert table[\"y\"].unit", "assert table[\"y\"].unit == \"pc\" assert_allclose(d[\"y\"], 0.3556330735924602) assert table[\"z\"].unit == \"pc\" assert_allclose(d[\"z\"], -0.37640823601179485) table", "works table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table", "make_base_catalog_galactic(n_sources=10, random_state=0) # To compute PWN parameters we need PSR and SNR parameters", "on the output so that we make sure we notice changes. assert len(table)", "assert table[\"lon\"].unit == \"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128) assert table[\"lat\"].unit == \"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267) assert", "\"kpc\" assert_allclose(d[\"z\"], 0.023161, atol=1e-5) assert table[\"vx\"].unit == \"km/s\" assert_allclose(d[\"vx\"], -150.727104, atol=1e-5) assert table[\"vy\"].unit", "= make_base_catalog_galactic(n_sources=10, random_state=0) table = add_observed_parameters(table) d = table[0] assert len(table) == 10", "len(table.colnames) == 13 assert table[\"age\"].unit == \"yr\" assert_allclose(d[\"age\"], 548813.50392732478) assert table[\"n_ISM\"].unit == \"cm-3\"", "table[\"y\"].unit == \"kpc\" assert_allclose(d[\"y\"], 3.081642, atol=1e-5) assert table[\"z\"].unit == \"kpc\" assert_allclose(d[\"z\"], 0.023161, atol=1e-5)", "[100, 1000] * u.yr table[\"n_ISM\"] = u.Quantity(1, \"cm-3\") table = add_snr_parameters(table) assert len(table)", "table[\"Tau0\"].unit == \"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06], atol=1e-5) assert table[\"L_PSR\"].unit == \"erg / s\"", "u from astropy.table import Table from gammapy.astro.population import ( add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters,", "[\"age\", \"n_ISM\", \"E_SN\", \"r_out\", \"r_in\", \"L_SNR\"] assert table[\"E_SN\"].unit == \"erg\" assert_allclose(table[\"E_SN\"], 1e51) assert", "== \"deg\" assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5) assert table[\"VGLON\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLON\"], 0.368166,", "0.36359133530192267) assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606) def test_make_base_catalog_galactic(): table = make_base_catalog_galactic(n_sources=10, random_state=0)", "== 34 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) assert table[\"RA\"].unit == \"deg\"", "assert_allclose(d[\"x\"], -5.941061, atol=1e-5) assert table[\"y\"].unit == \"kpc\" assert_allclose(d[\"y\"], 3.081642, atol=1e-5) assert table[\"z\"].unit ==", "== \"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267) assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606) def test_make_base_catalog_galactic(): table", "assert_allclose(d[\"y\"], 3.081642, atol=1e-5) assert table[\"z\"].unit == \"kpc\" assert_allclose(d[\"z\"], 0.023161, atol=1e-5) assert table[\"vx\"].unit ==", "make_catalog_random_positions_cube, make_catalog_random_positions_sphere, ) def test_make_catalog_random_positions_cube(): table = make_catalog_random_positions_cube(random_state=0) d = table[0] assert len(table)", "- see LICENSE.rst from numpy.testing import assert_allclose, assert_equal import astropy.units as u from", "\"erg / s\" assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33], rtol=1e-5) assert table[\"L0_PSR\"].unit == \"erg / s\"", "3.017292, atol=1e-5) assert table[\"z_birth\"].unit == \"kpc\" assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5) assert table[\"x\"].unit == \"kpc\"", "\"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06], atol=1e-5) assert table[\"L_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L_PSR\"], [2.599229e36,", "test_make_base_catalog_galactic(): table = make_base_catalog_galactic(n_sources=10, random_state=0) d = table[0] assert len(table) == 10 assert", "atol=1e-5) assert table[\"VGLON\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5) assert table[\"VGLAT\"].unit ==", "\"r_in\", \"L_SNR\"] assert table[\"E_SN\"].unit == \"erg\" assert_allclose(table[\"E_SN\"], 1e51) assert table[\"r_out\"].unit == \"pc\" assert_allclose(table[\"r_out\"],", "0.049088, atol=1e-5) assert table[\"x\"].unit == \"kpc\" assert_allclose(d[\"x\"], -5.941061, atol=1e-5) assert table[\"y\"].unit == \"kpc\"", "table[\"x\"].unit == \"pc\" assert_allclose(d[\"x\"], 0.0976270078546495) assert table[\"y\"].unit == \"pc\" assert_allclose(d[\"y\"], 0.3556330735924602) assert table[\"z\"].unit", "assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24], atol=1e-5) assert table[\"Tau0\"].unit == \"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06], atol=1e-5) assert", "[100, 1000] * u.yr table = add_pulsar_parameters(table, random_state=0) assert len(table) == 2 assert", "table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) table = add_observed_parameters(table) d = table[0]", "table[\"r_out\"].unit == \"pc\" assert_allclose(table[\"r_out\"], [1, 3.80730787743]) assert table[\"r_in\"].unit == \"pc\" assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743])", "def test_make_base_catalog_galactic(): table = make_base_catalog_galactic(n_sources=10, random_state=0) d = table[0] assert len(table) == 10", "atol=1e-5) def test_add_snr_parameters(): table = Table() table[\"age\"] = [100, 1000] * u.yr table[\"n_ISM\"]", "table.colnames == [\"age\", \"n_ISM\", \"E_SN\", \"r_out\", \"r_in\", \"L_SNR\"] assert table[\"E_SN\"].unit == \"erg\" assert_allclose(table[\"E_SN\"],", "== \"deg\" assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5) assert table[\"GLAT\"].unit == \"deg\" assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5) assert", "= Table() table[\"age\"] = [100, 1000] * u.yr table[\"n_ISM\"] = u.Quantity(1, \"cm-3\") table", "1000] * u.yr table = add_pulsar_parameters(table, random_state=0) assert len(table) == 2 assert len(table.colnames)", "== 100 assert len(table.colnames) == 3 assert table[\"lon\"].unit == \"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128) assert", "assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33], rtol=1e-5) assert table[\"B_PSR\"].unit == \"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11], rtol=1e-5) def", "\"s\" assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336], atol=1e-5) assert table[\"P1_birth\"].unit == \"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16], atol=1e-5)", "\"km/s\" assert_allclose(d[\"vy\"], 114.648494, atol=1e-5) assert table[\"vz\"].unit == \"km/s\" assert_allclose(d[\"vz\"], -46.193814, atol=1e-5) assert table[\"v_abs\"].unit", "= [100, 1000] * u.yr table[\"n_ISM\"] = u.Quantity(1, \"cm-3\") table = add_snr_parameters(table) assert", "assert len(table.colnames) == 13 assert table[\"age\"].unit == \"yr\" assert_allclose(d[\"age\"], 548813.50392732478) assert table[\"n_ISM\"].unit ==", "def test_add_snr_parameters(): table = Table() table[\"age\"] = [100, 1000] * u.yr table[\"n_ISM\"] =", "table = Table() table[\"age\"] = [100, 1000] * u.yr table[\"n_ISM\"] = u.Quantity(1, \"cm-3\")", "== \"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24], atol=1e-5) assert table[\"Tau0\"].unit == \"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06],", "table[\"CharAge\"].unit == \"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24], atol=1e-5) assert table[\"Tau0\"].unit == \"yr\" assert_allclose(table[\"Tau0\"], [5.131385e03,", "table = add_snr_parameters(table) assert len(table) == 2 assert table.colnames == [\"age\", \"n_ISM\", \"E_SN\",", "that we make sure we notice changes. assert len(table) == 10 assert len(table.colnames)", "table[\"z_birth\"].unit == \"kpc\" assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5) assert table[\"x\"].unit == \"kpc\" assert_allclose(d[\"x\"], -5.941061, atol=1e-5)", "def test_chain_all(): # Test that running the simulation functions in chain works table", "output so that we make sure we notice changes. assert len(table) == 10", "u.yr table = add_pulsar_parameters(table, random_state=0) assert len(table) == 2 assert len(table.colnames) == 10", "= Table() table[\"age\"] = [100, 1000] * u.yr table = add_pulsar_parameters(table, random_state=0) assert", "== \"erg\" assert_allclose(table[\"E_SN\"], 1e51) assert table[\"r_out\"].unit == \"pc\" assert_allclose(table[\"r_out\"], [1, 3.80730787743]) assert table[\"r_in\"].unit", "0.0976270078546495) assert table[\"y\"].unit == \"pc\" assert_allclose(d[\"y\"], 0.3556330735924602) assert table[\"z\"].unit == \"pc\" assert_allclose(d[\"z\"], -0.37640823601179485)", "Test that running the simulation functions in chain works table = make_base_catalog_galactic(n_sources=10, random_state=0)", "make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere, ) def test_make_catalog_random_positions_cube(): table = make_catalog_random_positions_cube(random_state=0) d = table[0] assert", "0.246336], atol=1e-5) assert table[\"P1_birth\"].unit == \"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16], atol=1e-5) assert table[\"CharAge\"].unit ==", "== \"km/s\" assert_allclose(d[\"vz\"], -46.193814, atol=1e-5) assert table[\"v_abs\"].unit == \"km/s\" assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5) def", "table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5) assert table[\"GLON\"].unit == \"deg\" assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5)", "Myr\" assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5) assert table[\"VGLAT\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5)", "in chain works table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_snr_parameters(table) table = add_pulsar_parameters(table,", "== \"kpc\" assert_allclose(d[\"y\"], 3.081642, atol=1e-5) assert table[\"z\"].unit == \"kpc\" assert_allclose(d[\"z\"], 0.023161, atol=1e-5) assert", "1.0768e33]) def test_add_pulsar_parameters(): table = Table() table[\"age\"] = [100, 1000] * u.yr table", "license - see LICENSE.rst from numpy.testing import assert_allclose, assert_equal import astropy.units as u", "len(table) == 10 assert len(table.colnames) == 34 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224,", "first table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) d =", "1000] * u.yr table[\"n_ISM\"] = u.Quantity(1, \"cm-3\") table = add_snr_parameters(table) assert len(table) ==", "functions in chain works table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_snr_parameters(table) table =", "\"L_SNR\"] assert table[\"E_SN\"].unit == \"erg\" assert_allclose(table[\"E_SN\"], 1e51) assert table[\"r_out\"].unit == \"pc\" assert_allclose(table[\"r_out\"], [1,", "assert table[\"P0\"].unit == \"s\" assert_allclose(table[\"P0\"], [0.214478, 0.246349], atol=1e-5) assert table[\"P1\"].unit == \"\" assert_allclose(table[\"P1\"],", "atol=1e-4) def test_add_observed_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_observed_parameters(table) d = table[0]", "\"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) def test_add_observed_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_observed_parameters(table)", "assert table[\"L_SNR\"].unit == \"1 / s\" assert_allclose(table[\"L_SNR\"], [0, 1.0768e33]) def test_add_pulsar_parameters(): table =", "= table[0] # Note: the individual functions are tested above. # Here we", "table[\"n_ISM\"].unit == \"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0) assert table[\"spiralarm\"].unit is None assert d[\"spiralarm\"] == \"Crux", "table[\"z\"].unit == \"kpc\" assert_allclose(d[\"z\"], 0.023161, atol=1e-5) assert table[\"vx\"].unit == \"km/s\" assert_allclose(d[\"vx\"], -150.727104, atol=1e-5)", "= make_base_catalog_galactic(n_sources=10, random_state=0) # To compute PWN parameters we need PSR and SNR", "0.6780943487897606) def test_make_base_catalog_galactic(): table = make_base_catalog_galactic(n_sources=10, random_state=0) d = table[0] assert len(table) ==", "assert_equal import astropy.units as u from astropy.table import Table from gammapy.astro.population import (", "assert table[\"B_PSR\"].unit == \"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11], rtol=1e-5) def test_add_pwn_parameters(): table = make_base_catalog_galactic(n_sources=10,", "100 assert len(table.colnames) == 3 assert table[\"x\"].unit == \"pc\" assert_allclose(d[\"x\"], 0.0976270078546495) assert table[\"y\"].unit", "len(table) == 10 assert len(table.colnames) == 27 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224,", "13 assert table[\"age\"].unit == \"yr\" assert_allclose(d[\"age\"], 548813.50392732478) assert table[\"n_ISM\"].unit == \"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0)", "table[\"L_SNR\"].unit == \"1 / s\" assert_allclose(table[\"L_SNR\"], [0, 1.0768e33]) def test_add_pulsar_parameters(): table = Table()", "\"erg\" assert_allclose(table[\"E_SN\"], 1e51) assert table[\"r_out\"].unit == \"pc\" assert_allclose(table[\"r_out\"], [1, 3.80730787743]) assert table[\"r_in\"].unit ==", "u.yr table[\"n_ISM\"] = u.Quantity(1, \"cm-3\") table = add_snr_parameters(table) assert len(table) == 2 assert", "assert table[\"r_in\"].unit == \"pc\" assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743]) assert table[\"L_SNR\"].unit == \"1 / s\"", "assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5) assert table[\"x\"].unit == \"kpc\" assert_allclose(d[\"x\"], -5.941061, atol=1e-5) assert table[\"y\"].unit ==", "d = table[0] assert len(table) == 10 assert len(table.colnames) == 27 assert table[\"r_out_PWN\"].unit", "the individual functions are tested above. # Here we just run them in", "assert_allclose(d[\"n_ISM\"], 1.0) assert table[\"spiralarm\"].unit is None assert d[\"spiralarm\"] == \"Crux Scutum\" assert table[\"x_birth\"].unit", "atol=1e-5) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5) assert table[\"DEC\"].unit == \"deg\" assert_allclose(d[\"DEC\"],", "== [\"age\", \"n_ISM\", \"E_SN\", \"r_out\", \"r_in\", \"L_SNR\"] assert table[\"E_SN\"].unit == \"erg\" assert_allclose(table[\"E_SN\"], 1e51)", "table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_observed_parameters(table) d = table[0] assert len(table) ==", "== \"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128) assert table[\"lat\"].unit == \"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267) assert table[\"distance\"].unit ==", "table[\"age\"] = [100, 1000] * u.yr table = add_pulsar_parameters(table, random_state=0) assert len(table) ==", "atol=1e-5) assert table[\"L_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33], rtol=1e-5) assert table[\"L0_PSR\"].unit", "assert_allclose(table[\"Tau0\"], [5.131385e03, 9.294538e06], atol=1e-5) assert table[\"L_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33],", "assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11], rtol=1e-5) def test_add_pwn_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) # To compute", "\"1 / s\" assert_allclose(table[\"L_SNR\"], [0, 1.0768e33]) def test_add_pulsar_parameters(): table = Table() table[\"age\"] =", "def test_add_pwn_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) # To compute PWN parameters we need", "and SNR parameters first table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table =", "atol=1e-5) assert table[\"vy\"].unit == \"km/s\" assert_allclose(d[\"vy\"], 114.648494, atol=1e-5) assert table[\"vz\"].unit == \"km/s\" assert_allclose(d[\"vz\"],", "add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) d = table[0] assert len(table) == 10 assert", "# Note: the individual functions are tested above. # Here we just run", "add_snr_parameters(table) assert len(table) == 2 assert table.colnames == [\"age\", \"n_ISM\", \"E_SN\", \"r_out\", \"r_in\",", "assert len(table.colnames) == 27 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) def test_add_observed_parameters():", "= table[0] assert len(table) == 10 assert len(table.colnames) == 27 assert table[\"r_out_PWN\"].unit ==", "random_state=0) d = table[0] assert len(table) == 10 assert len(table.colnames) == 13 assert", "assert_allclose(d[\"z\"], 0.023161, atol=1e-5) assert table[\"vx\"].unit == \"km/s\" assert_allclose(d[\"vx\"], -150.727104, atol=1e-5) assert table[\"vy\"].unit ==", "assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336], atol=1e-5) assert table[\"P1_birth\"].unit == \"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16], atol=1e-5) assert", "10 assert len(table.colnames) == 34 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) assert", "0) table = make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"], 0) def test_make_catalog_random_positions_sphere(): table =", "assert_allclose(table[\"P0\"], [0.214478, 0.246349], atol=1e-5) assert table[\"P1\"].unit == \"\" assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16], atol=1e-5) assert", "assert len(table.colnames) == 34 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) assert table[\"RA\"].unit", "table[0] # Note: the individual functions are tested above. # Here we just", "\"yr\" assert_allclose(d[\"age\"], 548813.50392732478) assert table[\"n_ISM\"].unit == \"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0) assert table[\"spiralarm\"].unit is None", "table[\"z\"].unit == \"pc\" assert_allclose(d[\"z\"], -0.37640823601179485) table = make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"], 0) table =", "\"s\" assert_allclose(table[\"P0\"], [0.214478, 0.246349], atol=1e-5) assert table[\"P1\"].unit == \"\" assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16], atol=1e-5)", "1.378224, atol=1e-4) def test_add_observed_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_observed_parameters(table) d =", "tested above. # Here we just run them in a chain and do", "LICENSE.rst from numpy.testing import assert_allclose, assert_equal import astropy.units as u from astropy.table import", "== \"km/s\" assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5) def test_add_snr_parameters(): table = Table() table[\"age\"] = [100,", "\"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5) assert table[\"DEC\"].unit == \"deg\" assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5) def test_chain_all():", "make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"], 0) table = make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"], 0) def", "table[\"E_SN\"].unit == \"erg\" assert_allclose(table[\"E_SN\"], 1e51) assert table[\"r_out\"].unit == \"pc\" assert_allclose(table[\"r_out\"], [1, 3.80730787743]) assert", "== \"pc\" assert_allclose(d[\"x\"], 0.0976270078546495) assert table[\"y\"].unit == \"pc\" assert_allclose(d[\"y\"], 0.3556330735924602) assert table[\"z\"].unit ==", "table[\"lat\"].unit == \"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267) assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606) def test_make_base_catalog_galactic():", "atol=1e-5) assert table[\"z\"].unit == \"kpc\" assert_allclose(d[\"z\"], 0.023161, atol=1e-5) assert table[\"vx\"].unit == \"km/s\" assert_allclose(d[\"vx\"],", "len(table.colnames) == 3 assert table[\"x\"].unit == \"pc\" assert_allclose(d[\"x\"], 0.0976270078546495) assert table[\"y\"].unit == \"pc\"", "assert_equal(table[\"z\"], 0) table = make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"], 0) def test_make_catalog_random_positions_sphere(): table", "Table() table[\"age\"] = [100, 1000] * u.yr table = add_pulsar_parameters(table, random_state=0) assert len(table)", "\"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606) def test_make_base_catalog_galactic(): table = make_base_catalog_galactic(n_sources=10, random_state=0) d = table[0] assert", "table[\"B_PSR\"].unit == \"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11], rtol=1e-5) def test_add_pwn_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0)", "table = make_base_catalog_galactic(n_sources=10, random_state=0) d = table[0] assert len(table) == 10 assert len(table.colnames)", "assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5) def test_chain_all(): # Test that running the simulation functions in", "-0.209514, atol=1e-5) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149, atol=1e-5) assert table[\"DEC\"].unit == \"deg\"", "== \"s\" assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336], atol=1e-5) assert table[\"P1_birth\"].unit == \"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16],", "table[\"x_birth\"].unit == \"kpc\" assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5) assert table[\"y_birth\"].unit == \"kpc\" assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5)", "== \"deg / Myr\" assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5) assert table[\"VGLAT\"].unit == \"deg / Myr\"", "/ s\" assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33], rtol=1e-5) assert table[\"L0_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L0_PSR\"],", "assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743]) assert table[\"L_SNR\"].unit == \"1 / s\" assert_allclose(table[\"L_SNR\"], [0, 1.0768e33]) def", "3.254597e11], rtol=1e-5) def test_add_pwn_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) # To compute PWN parameters", "table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table =", "test_chain_all(): # Test that running the simulation functions in chain works table =", "0) assert_equal(table[\"z\"], 0) def test_make_catalog_random_positions_sphere(): table = make_catalog_random_positions_sphere(random_state=0) d = table[0] assert len(table)", "Licensed under a 3-clause BSD style license - see LICENSE.rst from numpy.testing import", "\"kpc\" assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5) assert table[\"z_birth\"].unit == \"kpc\" assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5) assert table[\"x\"].unit", "add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) table = add_observed_parameters(table) d =", "a 3-clause BSD style license - see LICENSE.rst from numpy.testing import assert_allclose, assert_equal", "= add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) d = table[0] assert", "== \"km/s\" assert_allclose(d[\"vy\"], 114.648494, atol=1e-5) assert table[\"vz\"].unit == \"km/s\" assert_allclose(d[\"vz\"], -46.193814, atol=1e-5) assert", "4.198294e-16], atol=1e-5) assert table[\"P0_birth\"].unit == \"s\" assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336], atol=1e-5) assert table[\"P1_birth\"].unit ==", "0) def test_make_catalog_random_positions_sphere(): table = make_catalog_random_positions_sphere(random_state=0) d = table[0] assert len(table) == 100", "1.108788e33], rtol=1e-5) assert table[\"L0_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33], rtol=1e-5) assert", "very basic asserts # on the output so that we make sure we", "atol=1e-5) assert table[\"y_birth\"].unit == \"kpc\" assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5) assert table[\"z_birth\"].unit == \"kpc\" assert_allclose(d[\"z_birth\"],", "== \"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606) def test_make_base_catalog_galactic(): table = make_base_catalog_galactic(n_sources=10, random_state=0) d = table[0]", "we need PSR and SNR parameters first table = add_snr_parameters(table) table = add_pulsar_parameters(table,", "assert table[\"age\"].unit == \"yr\" assert_allclose(table[\"age\"], [100, 1000]) assert table[\"P0\"].unit == \"s\" assert_allclose(table[\"P0\"], [0.214478,", "BSD style license - see LICENSE.rst from numpy.testing import assert_allclose, assert_equal import astropy.units", "== \"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16], atol=1e-5) assert table[\"CharAge\"].unit == \"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24],", "random_state=0) table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) table =", "Scutum\" assert table[\"x_birth\"].unit == \"kpc\" assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5) assert table[\"y_birth\"].unit == \"kpc\" assert_allclose(d[\"y_birth\"],", "== 100 assert len(table.colnames) == 3 assert table[\"x\"].unit == \"pc\" assert_allclose(d[\"x\"], 0.0976270078546495) assert", "atol=1e-5) def test_chain_all(): # Test that running the simulation functions in chain works", "assert table[\"z\"].unit == \"pc\" assert_allclose(d[\"z\"], -0.37640823601179485) table = make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"], 0) table", "\"deg\" assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5) assert table[\"GLAT\"].unit == \"deg\" assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5) assert table[\"VGLON\"].unit", "s\" assert_allclose(table[\"L_PSR\"], [2.599229e36, 1.108788e33], rtol=1e-5) assert table[\"L0_PSR\"].unit == \"erg / s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36,", "3-clause BSD style license - see LICENSE.rst from numpy.testing import assert_allclose, assert_equal import", "table[\"GLAT\"].unit == \"deg\" assert_allclose(d[\"GLAT\"], 0.101948, atol=1e-5) assert table[\"VGLON\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLON\"],", "# Test that running the simulation functions in chain works table = make_base_catalog_galactic(n_sources=10,", "we just run them in a chain and do very basic asserts #", "assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5) assert table[\"z_birth\"].unit == \"kpc\" assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5) assert table[\"x\"].unit ==", "table[\"P0_birth\"].unit == \"s\" assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336], atol=1e-5) assert table[\"P1_birth\"].unit == \"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13,", "34 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"],", "assert_allclose(d[\"vx\"], -150.727104, atol=1e-5) assert table[\"vy\"].unit == \"km/s\" assert_allclose(d[\"vy\"], 114.648494, atol=1e-5) assert table[\"vz\"].unit ==", "len(table) == 10 assert len(table.colnames) == 20 assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 13016.572756,", "random_state=0) assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"], 0) def test_make_catalog_random_positions_sphere(): table = make_catalog_random_positions_sphere(random_state=0) d = table[0]", "assert table[\"GLON\"].unit == \"deg\" assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5) assert table[\"GLAT\"].unit == \"deg\" assert_allclose(d[\"GLAT\"], 0.101948,", "0.101948, atol=1e-5) assert table[\"VGLON\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5) assert table[\"VGLAT\"].unit", "3.80730787743]) assert table[\"r_in\"].unit == \"pc\" assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743]) assert table[\"L_SNR\"].unit == \"1 /", "sure we notice changes. assert len(table) == 10 assert len(table.colnames) == 34 assert", "atol=1e-5) assert table[\"VGLAT\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5) assert table[\"RA\"].unit ==", "table[\"DEC\"].unit == \"deg\" assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5) def test_chain_all(): # Test that running the", "= table[0] assert len(table) == 100 assert len(table.colnames) == 3 assert table[\"lon\"].unit ==", "== \"deg / Myr\" assert_allclose(d[\"VGLAT\"], -0.209514, atol=1e-5) assert table[\"RA\"].unit == \"deg\" assert_allclose(d[\"RA\"], 244.347149,", "atol=1e-5) assert table[\"v_abs\"].unit == \"km/s\" assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5) def test_add_snr_parameters(): table = Table()", "table[\"lon\"].unit == \"rad\" assert_allclose(d[\"lon\"], 3.4482969442579128) assert table[\"lat\"].unit == \"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267) assert table[\"distance\"].unit", "0.246349], atol=1e-5) assert table[\"P1\"].unit == \"\" assert_allclose(table[\"P1\"], [6.310423e-13, 4.198294e-16], atol=1e-5) assert table[\"P0_birth\"].unit ==", "== \"pc\" assert_allclose(d[\"y\"], 0.3556330735924602) assert table[\"z\"].unit == \"pc\" assert_allclose(d[\"z\"], -0.37640823601179485) table = make_catalog_random_positions_cube(dimension=2,", "assert table[\"VGLON\"].unit == \"deg / Myr\" assert_allclose(d[\"VGLON\"], 0.368166, atol=1e-5) assert table[\"VGLAT\"].unit == \"deg", "== \"kpc\" assert_allclose(d[\"x\"], -5.941061, atol=1e-5) assert table[\"y\"].unit == \"kpc\" assert_allclose(d[\"y\"], 3.081642, atol=1e-5) assert", "len(table.colnames) == 20 assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5) assert table[\"GLON\"].unit ==", "see LICENSE.rst from numpy.testing import assert_allclose, assert_equal import astropy.units as u from astropy.table", "table[\"x\"].unit == \"kpc\" assert_allclose(d[\"x\"], -5.941061, atol=1e-5) assert table[\"y\"].unit == \"kpc\" assert_allclose(d[\"y\"], 3.081642, atol=1e-5)", "are tested above. # Here we just run them in a chain and", "assert table[\"n_ISM\"].unit == \"cm-3\" assert_allclose(d[\"n_ISM\"], 1.0) assert table[\"spiralarm\"].unit is None assert d[\"spiralarm\"] ==", "== \"km/s\" assert_allclose(d[\"vx\"], -150.727104, atol=1e-5) assert table[\"vy\"].unit == \"km/s\" assert_allclose(d[\"vy\"], 114.648494, atol=1e-5) assert", "== \"kpc\" assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5) assert table[\"x\"].unit == \"kpc\" assert_allclose(d[\"x\"], -5.941061, atol=1e-5) assert", "is None assert d[\"spiralarm\"] == \"Crux Scutum\" assert table[\"x_birth\"].unit == \"kpc\" assert_allclose(d[\"x_birth\"], -5.856461,", "table[\"age\"].unit == \"yr\" assert_allclose(table[\"age\"], [100, 1000]) assert table[\"P0\"].unit == \"s\" assert_allclose(table[\"P0\"], [0.214478, 0.246349],", "d[\"spiralarm\"] == \"Crux Scutum\" assert table[\"x_birth\"].unit == \"kpc\" assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5) assert table[\"y_birth\"].unit", "u.Quantity(1, \"cm-3\") table = add_snr_parameters(table) assert len(table) == 2 assert table.colnames == [\"age\",", "== 10 assert len(table.colnames) == 34 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4)", "== \"pc\" assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743]) assert table[\"L_SNR\"].unit == \"1 / s\" assert_allclose(table[\"L_SNR\"], [0,", "table[0] assert len(table) == 100 assert len(table.colnames) == 3 assert table[\"x\"].unit == \"pc\"", "table[\"n_ISM\"] = u.Quantity(1, \"cm-3\") table = add_snr_parameters(table) assert len(table) == 2 assert table.colnames", "assert table[\"P0_birth\"].unit == \"s\" assert_allclose(table[\"P0_birth\"], [0.212418, 0.246336], atol=1e-5) assert table[\"P1_birth\"].unit == \"\" assert_allclose(table[\"P1_birth\"],", "[0, 1.0768e33]) def test_add_pulsar_parameters(): table = Table() table[\"age\"] = [100, 1000] * u.yr", "assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5) def test_add_snr_parameters(): table = Table() table[\"age\"] = [100, 1000] *", "assert table[\"P1_birth\"].unit == \"\" assert_allclose(table[\"P1_birth\"], [6.558773e-13, 4.199198e-16], atol=1e-5) assert table[\"CharAge\"].unit == \"yr\" assert_allclose(table[\"CharAge\"],", "table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606) def test_make_base_catalog_galactic(): table = make_base_catalog_galactic(n_sources=10, random_state=0) d =", "atol=1e-5) assert table[\"vx\"].unit == \"km/s\" assert_allclose(d[\"vx\"], -150.727104, atol=1e-5) assert table[\"vy\"].unit == \"km/s\" assert_allclose(d[\"vy\"],", "[100, 1000]) assert table[\"P0\"].unit == \"s\" assert_allclose(table[\"P0\"], [0.214478, 0.246349], atol=1e-5) assert table[\"P1\"].unit ==", "assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) def test_add_observed_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_observed_parameters(table) d", "\"cm-3\") table = add_snr_parameters(table) assert len(table) == 2 assert table.colnames == [\"age\", \"n_ISM\",", "\"km/s\" assert_allclose(d[\"vz\"], -46.193814, atol=1e-5) assert table[\"v_abs\"].unit == \"km/s\" assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5) def test_add_snr_parameters():", "# on the output so that we make sure we notice changes. assert", "that running the simulation functions in chain works table = make_base_catalog_galactic(n_sources=10, random_state=0) table", "assert_allclose(d[\"vy\"], 114.648494, atol=1e-5) assert table[\"vz\"].unit == \"km/s\" assert_allclose(d[\"vz\"], -46.193814, atol=1e-5) assert table[\"v_abs\"].unit ==", "== \"pc\" assert_allclose(table[\"r_out\"], [1, 3.80730787743]) assert table[\"r_in\"].unit == \"pc\" assert_allclose(table[\"r_in\"], [0.9086, 3.45931993743]) assert", "add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere, ) def test_make_catalog_random_positions_cube(): table = make_catalog_random_positions_cube(random_state=0) d = table[0]", "== \"erg / s\" assert_allclose(table[\"L0_PSR\"], [2.701524e36, 1.109026e33], rtol=1e-5) assert table[\"B_PSR\"].unit == \"G\" assert_allclose(table[\"B_PSR\"],", "len(table.colnames) == 34 assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) assert table[\"RA\"].unit ==", "atol=1e-5) assert table[\"GLON\"].unit == \"deg\" assert_allclose(d[\"GLON\"], -27.156565, atol=1e-5) assert table[\"GLAT\"].unit == \"deg\" assert_allclose(d[\"GLAT\"],", "above. # Here we just run them in a chain and do very", "== \"G\" assert_allclose(table[\"B_PSR\"], [1.194420e13, 3.254597e11], rtol=1e-5) def test_add_pwn_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) #", "= add_observed_parameters(table) d = table[0] # Note: the individual functions are tested above.", "3 assert table[\"x\"].unit == \"pc\" assert_allclose(d[\"x\"], 0.0976270078546495) assert table[\"y\"].unit == \"pc\" assert_allclose(d[\"y\"], 0.3556330735924602)", "assert table[\"spiralarm\"].unit is None assert d[\"spiralarm\"] == \"Crux Scutum\" assert table[\"x_birth\"].unit == \"kpc\"", "d = table[0] # Note: the individual functions are tested above. # Here", "Note: the individual functions are tested above. # Here we just run them", "table = add_snr_parameters(table) table = add_pulsar_parameters(table, random_state=0) table = add_pwn_parameters(table) table = add_observed_parameters(table)", "random_state=0) assert len(table) == 2 assert len(table.colnames) == 10 assert table[\"age\"].unit == \"yr\"", "table = make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"], 0) table = make_catalog_random_positions_cube(dimension=1, random_state=0) assert_equal(table[\"y\"], 0) assert_equal(table[\"z\"],", "\"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267) assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606) def test_make_base_catalog_galactic(): table =", "assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 0.6780943487897606) def test_make_base_catalog_galactic(): table = make_base_catalog_galactic(n_sources=10, random_state=0) d", "import Table from gammapy.astro.population import ( add_observed_parameters, add_pulsar_parameters, add_pwn_parameters, add_snr_parameters, make_base_catalog_galactic, make_catalog_random_positions_cube, make_catalog_random_positions_sphere,", "assert table[\"CharAge\"].unit == \"yr\" assert_allclose(table[\"CharAge\"], [2.207394e-21, 1.638930e-24], atol=1e-5) assert table[\"Tau0\"].unit == \"yr\" assert_allclose(table[\"Tau0\"],", "asserts # on the output so that we make sure we notice changes.", "== \"pc\" assert_allclose(d[\"z\"], -0.37640823601179485) table = make_catalog_random_positions_cube(dimension=2, random_state=0) assert_equal(table[\"z\"], 0) table = make_catalog_random_positions_cube(dimension=1,", "table[\"v_abs\"].unit == \"km/s\" assert_allclose(d[\"v_abs\"], 194.927693, atol=1e-5) def test_add_snr_parameters(): table = Table() table[\"age\"] =", "make_catalog_random_positions_sphere, ) def test_make_catalog_random_positions_cube(): table = make_catalog_random_positions_cube(random_state=0) d = table[0] assert len(table) ==", "10 assert table[\"age\"].unit == \"yr\" assert_allclose(table[\"age\"], [100, 1000]) assert table[\"P0\"].unit == \"s\" assert_allclose(table[\"P0\"],", "assert_allclose(d[\"lon\"], 3.4482969442579128) assert table[\"lat\"].unit == \"rad\" assert_allclose(d[\"lat\"], 0.36359133530192267) assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"],", "-50.410142, atol=1e-5) def test_chain_all(): # Test that running the simulation functions in chain", "functions are tested above. # Here we just run them in a chain", "assert len(table) == 100 assert len(table.colnames) == 3 assert table[\"lon\"].unit == \"rad\" assert_allclose(d[\"lon\"],", "assert len(table) == 2 assert table.colnames == [\"age\", \"n_ISM\", \"E_SN\", \"r_out\", \"r_in\", \"L_SNR\"]", "10 assert len(table.colnames) == 20 assert table[\"distance\"].unit == \"pc\" assert_allclose(d[\"distance\"], 13016.572756, atol=1e-5) assert", "assert table[\"r_out_PWN\"].unit == \"pc\" assert_allclose(d[\"r_out_PWN\"], 1.378224, atol=1e-4) def test_add_observed_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0)", "table[\"y_birth\"].unit == \"kpc\" assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5) assert table[\"z_birth\"].unit == \"kpc\" assert_allclose(d[\"z_birth\"], 0.049088, atol=1e-5)", "table[0] assert len(table) == 10 assert len(table.colnames) == 27 assert table[\"r_out_PWN\"].unit == \"pc\"", "atol=1e-5) assert table[\"DEC\"].unit == \"deg\" assert_allclose(d[\"DEC\"], -50.410142, atol=1e-5) def test_chain_all(): # Test that", "/ s\" assert_allclose(table[\"L_SNR\"], [0, 1.0768e33]) def test_add_pulsar_parameters(): table = Table() table[\"age\"] = [100,", "test_add_observed_parameters(): table = make_base_catalog_galactic(n_sources=10, random_state=0) table = add_observed_parameters(table) d = table[0] assert len(table)", "\"kpc\" assert_allclose(d[\"x_birth\"], -5.856461, atol=1e-5) assert table[\"y_birth\"].unit == \"kpc\" assert_allclose(d[\"y_birth\"], 3.017292, atol=1e-5) assert table[\"z_birth\"].unit" ]
[ "k2 + S1 - k1 k0 = k1 + S2 for a1 in", "mu' per line, e.g.: # 204 63 22 18 # 204 28 2", "on gamma0 continue elif not is_integer_num(f): # checks integrality of eigenvalue multiplicities continue", "= k2 + S1 - k1 k0 = k1 + S2 for a1", "for entry in range(0,4): # converting text to integer g1[entry] = int( line1[entry]", "going if (rho2 == r2 and sig2 == s2) or (rho2 == s2", "== s2 and sig2 == r2): # print out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1,", "= a1 + lam2 if a2 >= 0 and lam0 < k0: #", "c**2 + 4*(k1 - mu1) d = math.sqrt(delta) r1 = ( c +", "eigenvalue stage\") else: continue # print(S1, \"failed at eigenvalue stage\") else: continue #print(\"P1", "- mu2 delta = c**2 + 4*(k2 - mu2) d = math.sqrt(delta) r2", "the SRD. # Usage: python3.7 SRD-onef.py <infile> <outfile> # The infile should be", "a1 in DGH but need to allow b1 < S1. b2 = b1", "['']: lines.append(graphs.readline().split('\\t') ) count = len(lines)-1 # this is because last item in", "is because last item in list is now '' for i in range(0,count):", "a header row. params.write(tabulate(results, headers = [\"n_i\", \"k_i\", \"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\", \"n\", \"k\",", "sig1 == s1) or (rho1 == s1 and sig1 == r1): # then", "S1. b2 = b1 + mu2 - mu1 mu0 = b1 + mu2", "srgs n1 = graph1[0] k1 = graph1[1] lam1 = graph1[2] mu1 = graph1[3]", "strongly regular designs admitting # strongly regular decomposition, in which two graphs from", "n2 = graph2[0] k2 = graph2[1] lam2 = graph2[2] mu2 = graph2[3] #", "srgs g1 = [0,0,0,0] g2 = [0,0,0,0] lines = [] # first, extract", "mu0) d = math.sqrt(delta) r = ( c + d )/2 s =", "with one SRG parameter set in the form 'n k lambda mu' per", "in which two graphs from the input file are # the SRGs on", "= int( line2[entry] ) newones = check_4_srd(g1,g2) # new sets are then appended", "new sets are then appended to the results table results += newones #", "\",\" \",S2,a2, b2, N2, P2]) else: continue # print(S1, \"failed at eigenvalue stage\")", "- 2*r*s*s - s*s - k0*s + k0*r*r + 2*k0*r < 0: #", ")/(n2-S2) if is_integer_num(P1) and is_integer_num(P2): rho1 = N1-P1 rho2 = N2-P2 sig1 =", "out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\" \",\" \",\" \",\" \",S2,a2,", "in range(0,4): # converting text to integer g1[entry] = int( line1[entry] ) g2[entry]", "isinstance(n, float): return n.is_integer() return False def check_4_srd(graph1, graph2): # checks a given", "# Loops through the input file to compare all pairs, including g2=g1. #", "d )/2 n2 = graph2[0] k2 = graph2[1] lam2 = graph2[2] mu2 =", "\"k_i\", \"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\", \"n\", \"k\", \"lam\", \"mu\", \"r\", \"s\", \"S_i\", \"a_i\", \"b_i\",", "mu2 = graph2[3] # compute r2, s2 c = lam2 - mu2 delta", "results with a header row. params.write(tabulate(results, headers = [\"n_i\", \"k_i\", \"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\",", "= ( c - d )/2 n2 = graph2[0] k2 = graph2[1] lam2", "k1 k0 = k1 + S2 for a1 in range(0, S1): a2 =", "- mu2) d = math.sqrt(delta) r2 = ( c + d )/2 s2", "0 lines.append(graphs.readline().split('\\t')) while lines[ len(lines)-1 ] != ['']: lines.append(graphs.readline().split('\\t') ) count = len(lines)-1", "2 n = n1 + n2 while S1 < n1: S2 = k2", "b2 == a2 or b2 < 0 or n-2*k0+mu0-2 < 0 or n-2*k0+lam0", "!=S2: # then good P1 = ( (k1-N1)*S1 )/(n1-S1) # check it's an", "g2[entry] = int( line2[entry] ) newones = check_4_srd(g1,g2) # new sets are then", "# Usage: python3.7 SRD-onef.py <infile> <outfile> # The infile should be a tab-separated", "2 + k1 - k2 else: S1 = 2 n = n1 +", "and n2 !=S2: # then good P1 = ( (k1-N1)*S1 )/(n1-S1) # check", "# compute r1, s1 c = lam1 - mu1 delta = c**2 +", "# The infile should be a tab-separated text file with one SRG parameter", "lam2 = graph2[2] mu2 = graph2[3] # compute r2, s2 c = lam2", ")/2 s1 = ( c - d )/2 n2 = graph2[0] k2 =", "S1 = 2 + k1 - k2 else: S1 = 2 n =", "lam1 = graph1[2] mu1 = graph1[3] # compute r1, s1 c = lam1", "will be table of feasible parameters for srds arising from a pair of", "lam1 lam0 = a1 + lam2 if a2 >= 0 and lam0 <", "= [] if k2 < k1: S1 = 2 + k1 - k2", "# 204 28 2 4 # 205 96 50 40 # 205 68", "last item in list is now '' for i in range(0,count): for j", "int): return True if isinstance(n, float): return n.is_integer() return False def check_4_srd(graph1, graph2):", "c = lam0 - mu0 delta = c**2 + 4*(k0 - mu0) d", "+ k1 - k2 else: S1 = 2 n = n1 + n2", "k0*s*s + 2*k0*s < 0 or r*r*s - 2*r*s*s - s*s - k0*s", "in range(i, count): line1 = lines[i] # reads the text, separated by spaces,", "# start of main graphs = open(infile) params = open(outfile, 'w') results =", "including g2=g1. # Uses tabulate to write results with a header row. params.write(tabulate(results,", "of function check_4_srd #___________________________________ # start of main graphs = open(infile) params =", "< 0: # above checks Krein parameters on gamma0 continue elif not is_integer_num(f):", "r1 and sig1 == s1) or (rho1 == s1 and sig1 == r1):", "range(0, S1): a2 = a1 + lam2 - lam1 lam0 = a1 +", "\",S2,a2, b2, N2, P2]) else: continue # print(S1, \"failed at eigenvalue stage\") else:", "spaces, into list line2 = lines[j] for entry in range(0,4): # converting text", "parameter sets for strongly regular designs admitting # strongly regular decomposition, in which", "a2 >= 0 and lam0 < k0: # then carry on if a1", "= [0,0,0,0] lines = [] # first, extract a pair of lines from", "# b1 < a1 in DGH but need to allow b1 < S1.", "= graph1[2] mu1 = graph1[3] # compute r1, s1 c = lam1 -", "+= 1 return(table) # end of function check_4_srd #___________________________________ # start of main", "graphs = open(infile) params = open(outfile, 'w') results = [] # will be", "= [] # will be table of feasible parameters for srds arising from", "( c - d )/2 table = [] if k2 < k1: S1", "if (rho2 == r2 and sig2 == s2) or (rho2 == s2 and", "strongly regular decomposition, in which two graphs from the input file are #", "s1 and sig1 == r1): # then all is good, keep going if", "compute r1, s1 c = lam1 - mu1 delta = c**2 + 4*(k1", "r1, s1 c = lam1 - mu1 delta = c**2 + 4*(k1 -", "(n-k0-1)*mu0 != k0*(k0-lam0-1): continue elif r*s*s - 2*r*r*s - r*r - k0*r +", "= a2*k1/S2 N2 = a1*k2/S1 if is_integer_num(N1) and is_integer_num(N2) and n1 != S1", "= a1*k2/S1 if is_integer_num(N1) and is_integer_num(N2) and n1 != S1 and n2 !=S2:", "(rho1 == s1 and sig1 == r1): # then all is good, keep", "< 0 or n-2*k0+lam0 < 0: continue #any of those would be invalid", "# converting text to integer g1[entry] = int( line1[entry] ) g2[entry] = int(", "Output is feasible parameter sets for strongly regular designs admitting # strongly regular", "= graph1[0] k1 = graph1[1] lam1 = graph1[2] mu1 = graph1[3] # compute", "- k2 else: S1 = 2 n = n1 + n2 while S1", "two fibres of the SRD. # Usage: python3.7 SRD-onef.py <infile> <outfile> # The", "g2 = [0,0,0,0] lines = [] # first, extract a pair of lines", "graph2[1] lam2 = graph2[2] mu2 = graph2[3] # compute r2, s2 c =", "= -(S2-b2)/(a2-b2) sig2 = -(S1-b1)/(a1-b1) if (rho1 == r1 and sig1 == s1)", "top = S1 else: bot = 0 top = a1 for b1 in", "graph2): # checks a given pair of srgs n1 = graph1[0] k1 =", "mu2 c = lam0 - mu0 delta = c**2 + 4*(k0 - mu0)", "s2 and sig2 == r2): # print out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1])", "+ mu2 - mu1 mu0 = b1 + mu2 c = lam0 -", "= 2 + k1 - k2 else: S1 = 2 n = n1", "= ( (k1-N1)*S1 )/(n1-S1) # check it's an integer P2 = ( (k2-N2)*S2", "c**2 + 4*(k0 - mu0) d = math.sqrt(delta) r = ( c +", "< k1: S1 = 2 + k1 - k2 else: S1 = 2", "from the text file of srgs i = 1 j = 1 count", "S2 = k2 + S1 - k1 k0 = k1 + S2 for", "\"r\", \"s\", \"S_i\", \"a_i\", \"b_i\", \"N_i\", \"P_i\"])) params.close() graphs.close() # print( time.perf_counter() -", "an integer P2 = ( (k2-N2)*S2 )/(n2-S2) if is_integer_num(P1) and is_integer_num(P2): rho1 =", "check_4_srd(graph1, graph2): # checks a given pair of srgs n1 = graph1[0] k1", "while S1 < n1: S2 = k2 + S1 - k1 k0 =", "- mu0 delta = c**2 + 4*(k0 - mu0) d = math.sqrt(delta) r", "continue #print(\"P1 or P2 not integer\") else: continue #print(\"N1 or N2 not integer\")", "at eigenvalue stage\") else: continue # print(S1, \"failed at eigenvalue stage\") else: continue", "S1 = 2 n = n1 + n2 while S1 < n1: S2", "int( line1[entry] ) g2[entry] = int( line2[entry] ) newones = check_4_srd(g1,g2) # new", "= graph2[0] k2 = graph2[1] lam2 = graph2[2] mu2 = graph2[3] # compute", "= 0 lines.append(graphs.readline().split('\\t')) while lines[ len(lines)-1 ] != ['']: lines.append(graphs.readline().split('\\t') ) count =", "on N1 = a2*k1/S2 N2 = a1*k2/S1 if is_integer_num(N1) and is_integer_num(N2) and n1", "and is_integer_num(N2) and n1 != S1 and n2 !=S2: # then good P1", "open(outfile, 'w') results = [] # will be table of feasible parameters for", "newones # Loops through the input file to compare all pairs, including g2=g1.", "= math.sqrt(delta) r1 = ( c + d )/2 s1 = ( c", "#print(\"P1 or P2 not integer\") else: continue #print(\"N1 or N2 not integer\") else:", "!= S1 and n2 !=S2: # then good P1 = ( (k1-N1)*S1 )/(n1-S1)", "float): return n.is_integer() return False def check_4_srd(graph1, graph2): # checks a given pair", "stage\") else: continue # print(S1, \"failed at eigenvalue stage\") else: continue #print(\"P1 or", "[] if k2 < k1: S1 = 2 + k1 - k2 else:", "s2) or (rho2 == s2 and sig2 == r2): # print out parameters", "return(table) # end of function check_4_srd #___________________________________ # start of main graphs =", "+ lam2 - lam1 lam0 = a1 + lam2 if a2 >= 0", "= ( c - d )/2 f = (( n-1 )*(-s) - k0)/(r-s)", "= ( c + d )/2 s1 = ( c - d )/2", "rho1 = N1-P1 rho2 = N2-P2 sig1 = -(S2-b2)/(a2-b2) sig2 = -(S1-b1)/(a1-b1) if", "and sig2 == r2): # print out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\"", "graph1[0] k1 = graph1[1] lam1 = graph1[2] mu1 = graph1[3] # compute r1,", "while lines[ len(lines)-1 ] != ['']: lines.append(graphs.readline().split('\\t') ) count = len(lines)-1 # this", "else: continue #print(\"a2 or lam0 problem\") S1 += 1 return(table) # end of", "= [\"n_i\", \"k_i\", \"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\", \"n\", \"k\", \"lam\", \"mu\", \"r\", \"s\", \"S_i\",", ")/2 f = (( n-1 )*(-s) - k0)/(r-s) # check these parameters first", "- 2*r*r*s - r*r - k0*r + k0*s*s + 2*k0*s < 0 or", "import tabulate import math def is_integer_num(n): if isinstance(n, int): return True if isinstance(n,", "table = [] if k2 < k1: S1 = 2 + k1 -", "list line2 = lines[j] for entry in range(0,4): # converting text to integer", "] != ['']: lines.append(graphs.readline().split('\\t') ) count = len(lines)-1 # this is because last", "sig2 == r2): # print out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\"", ")/2 table = [] if k2 < k1: S1 = 2 + k1", "for a1 in range(0, S1): a2 = a1 + lam2 - lam1 lam0", "<reponame>sankeyad/strongly-regular-designs<gh_stars>0 # SRD-onef.py # Reads a single text file of strongly regular graph", "# checks a given pair of srgs n1 = graph1[0] k1 = graph1[1]", "S1 else: bot = 0 top = a1 for b1 in range(bot, top):", "18 # 204 28 2 4 # 205 96 50 40 # 205", "a given pair of srgs n1 = graph1[0] k1 = graph1[1] lam1 =", "or (rho2 == s2 and sig2 == r2): # print out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1,", "= -(S1-b1)/(a1-b1) if (rho1 == r1 and sig1 == s1) or (rho1 ==", "d = math.sqrt(delta) r1 = ( c + d )/2 s1 = (", "r2, s2 c = lam2 - mu2 delta = c**2 + 4*(k2 -", "SRD-onef.py # Reads a single text file of strongly regular graph parameters. #", "-(S1-b1)/(a1-b1) if (rho1 == r1 and sig1 == s1) or (rho1 == s1", "in DGH but need to allow b1 < S1. b2 = b1 +", "n1: S2 = k2 + S1 - k1 k0 = k1 + S2", "s1) or (rho1 == s1 and sig1 == r1): # then all is", "line1 = lines[i] # reads the text, separated by spaces, into list line2", "== r1): # then all is good, keep going if (rho2 == r2", "a1+1 top = S1 else: bot = 0 top = a1 for b1", "k1 + S2 for a1 in range(0, S1): a2 = a1 + lam2", "count = len(lines)-1 # this is because last item in list is now", "= lam2 - mu2 delta = c**2 + 4*(k2 - mu2) d =", "to the results table results += newones # Loops through the input file", "s*s - k0*s + k0*r*r + 2*k0*r < 0: # above checks Krein", "= lam0 - mu0 delta = c**2 + 4*(k0 - mu0) d =", "b2 < 0 or n-2*k0+mu0-2 < 0 or n-2*k0+lam0 < 0: continue #any", "\"sig_i\", \"n\", \"k\", \"lam\", \"mu\", \"r\", \"s\", \"S_i\", \"a_i\", \"b_i\", \"N_i\", \"P_i\"])) params.close()", "mu1) d = math.sqrt(delta) r1 = ( c + d )/2 s1 =", "P1 = ( (k1-N1)*S1 )/(n1-S1) # check it's an integer P2 = (", "two graphs from the input file are # the SRGs on the two", "n2 !=S2: # then good P1 = ( (k1-N1)*S1 )/(n1-S1) # check it's", "+ 4*(k2 - mu2) d = math.sqrt(delta) r2 = ( c + d", "= b1 + mu2 c = lam0 - mu0 delta = c**2 +", "- r*r - k0*r + k0*s*s + 2*k0*s < 0 or r*r*s -", "d )/2 s1 = ( c - d )/2 n2 = graph2[0] k2", "40 # 205 68 15 26 # 208 75 30 25 # 208", "elif r*s*s - 2*r*r*s - r*r - k0*r + k0*s*s + 2*k0*s <", "should be a tab-separated text file with one SRG parameter set in the", "= graph2[1] lam2 = graph2[2] mu2 = graph2[3] # compute r2, s2 c", "\",\" \",\" \",\" \",S2,a2, b2, N2, P2]) else: continue # print(S1, \"failed at", "else: continue # print(S1, \"failed at eigenvalue stage\") else: continue #print(\"P1 or P2", "end of function check_4_srd #___________________________________ # start of main graphs = open(infile) params", "k lambda mu' per line, e.g.: # 204 63 22 18 # 204", "= ( (k2-N2)*S2 )/(n2-S2) if is_integer_num(P1) and is_integer_num(P2): rho1 = N1-P1 rho2 =", "is now '' for i in range(0,count): for j in range(i, count): line1", "c - d )/2 table = [] if k2 < k1: S1 =", "mu2 - mu1 mu0 = b1 + mu2 c = lam0 - mu0", "range(i, count): line1 = lines[i] # reads the text, separated by spaces, into", "file to compare all pairs, including g2=g1. # Uses tabulate to write results", "extract a pair of lines from the text file of srgs i =", "is_integer_num(P2): rho1 = N1-P1 rho2 = N2-P2 sig1 = -(S2-b2)/(a2-b2) sig2 = -(S1-b1)/(a1-b1)", "\"mu\", \"r\", \"s\", \"S_i\", \"a_i\", \"b_i\", \"N_i\", \"P_i\"])) params.close() graphs.close() # print( time.perf_counter()", "b1 in range(bot, top): # b1 < a1 in DGH but need to", "2*r*r*s - r*r - k0*r + k0*s*s + 2*k0*s < 0 or r*r*s", "4*(k1 - mu1) d = math.sqrt(delta) r1 = ( c + d )/2", "= k1 + S2 for a1 in range(0, S1): a2 = a1 +", "infile, outfile = argv from tabulate import tabulate import math def is_integer_num(n): if", "checks Krein parameters on gamma0 continue elif not is_integer_num(f): # checks integrality of", "or n-2*k0+lam0 < 0: continue #any of those would be invalid elif (n-k0-1)*mu0", "graph2[2] mu2 = graph2[3] # compute r2, s2 c = lam2 - mu2", "to allow b1 < S1. b2 = b1 + mu2 - mu1 mu0", "25 # 208 81 24 36 #import time #begin_time = time.perf_counter() from sys", "argv script, infile, outfile = argv from tabulate import tabulate import math def", "a pair of lines from the text file of srgs i = 1", "to write results with a header row. params.write(tabulate(results, headers = [\"n_i\", \"k_i\", \"lam_i\",", "< a1 in DGH but need to allow b1 < S1. b2 =", "4*(k2 - mu2) d = math.sqrt(delta) r2 = ( c + d )/2", "# print(S1, \"failed at eigenvalue stage\") else: continue # print(S1, \"failed at eigenvalue", "of srgs i = 1 j = 1 count = 0 lines.append(graphs.readline().split('\\t')) while", "first if b2 == a2 or b2 < 0 or n-2*k0+mu0-2 < 0", "infile should be a tab-separated text file with one SRG parameter set in", "top = a1 for b1 in range(bot, top): # b1 < a1 in", "eigenvalue stage\") else: continue #print(\"P1 or P2 not integer\") else: continue #print(\"N1 or", "graph1[3] # compute r1, s1 c = lam1 - mu1 delta = c**2", "== r2 and sig2 == s2) or (rho2 == s2 and sig2 ==", "input file to compare all pairs, including g2=g1. # Uses tabulate to write", "elif not is_integer_num(f): # checks integrality of eigenvalue multiplicities continue else: # carry", "# this is because last item in list is now '' for i", "write results with a header row. params.write(tabulate(results, headers = [\"n_i\", \"k_i\", \"lam_i\", \"mu_i\",\"rho_i\",", "a2 or b2 < 0 or n-2*k0+mu0-2 < 0 or n-2*k0+lam0 < 0:", "are then appended to the results table results += newones # Loops through", "- d )/2 table = [] if k2 < k1: S1 = 2", "S2 for a1 in range(0, S1): a2 = a1 + lam2 - lam1", "#begin_time = time.perf_counter() from sys import argv script, infile, outfile = argv from", "- mu1 delta = c**2 + 4*(k1 - mu1) d = math.sqrt(delta) r1", "a single text file of strongly regular graph parameters. # Output is feasible", "2*k0*r < 0: # above checks Krein parameters on gamma0 continue elif not", "the input file to compare all pairs, including g2=g1. # Uses tabulate to", "\"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\", \"n\", \"k\", \"lam\", \"mu\", \"r\", \"s\", \"S_i\", \"a_i\", \"b_i\", \"N_i\",", "reads the text, separated by spaces, into list line2 = lines[j] for entry", "continue #print(\"a2 or lam0 problem\") S1 += 1 return(table) # end of function", "68 15 26 # 208 75 30 25 # 208 81 24 36", "c + d )/2 s = ( c - d )/2 f =", "k0*r*r + 2*k0*r < 0: # above checks Krein parameters on gamma0 continue", "n1 + n2 while S1 < n1: S2 = k2 + S1 -", "= lam1 - mu1 delta = c**2 + 4*(k1 - mu1) d =", "to integer g1[entry] = int( line1[entry] ) g2[entry] = int( line2[entry] ) newones", "time #begin_time = time.perf_counter() from sys import argv script, infile, outfile = argv", "feasible parameter sets for strongly regular designs admitting # strongly regular decomposition, in", "in range(bot, top): # b1 < a1 in DGH but need to allow", "n2 while S1 < n1: S2 = k2 + S1 - k1 k0", ">= 0 and lam0 < k0: # then carry on if a1 <", "== s2) or (rho2 == s2 and sig2 == r2): # print out", "[0,0,0,0] lines = [] # first, extract a pair of lines from the", "205 96 50 40 # 205 68 15 26 # 208 75 30", "lam0 < k0: # then carry on if a1 < S1*S2/n2: bot =", "d = math.sqrt(delta) r = ( c + d )/2 s = (", "= len(lines)-1 # this is because last item in list is now ''", "d )/2 table = [] if k2 < k1: S1 = 2 +", "because last item in list is now '' for i in range(0,count): for", "graph2[3] # compute r2, s2 c = lam2 - mu2 delta = c**2", "= ( c - d )/2 table = [] if k2 < k1:", "mu2 delta = c**2 + 4*(k2 - mu2) d = math.sqrt(delta) r2 =", "2*r*s*s - s*s - k0*s + k0*r*r + 2*k0*r < 0: # above", "table results += newones # Loops through the input file to compare all", "range(0,count): for j in range(i, count): line1 = lines[i] # reads the text,", "lines = [] # first, extract a pair of lines from the text", "e.g.: # 204 63 22 18 # 204 28 2 4 # 205", "<infile> <outfile> # The infile should be a tab-separated text file with one", "tabulate import math def is_integer_num(n): if isinstance(n, int): return True if isinstance(n, float):", "= c**2 + 4*(k0 - mu0) d = math.sqrt(delta) r = ( c", "\"S_i\", \"a_i\", \"b_i\", \"N_i\", \"P_i\"])) params.close() graphs.close() # print( time.perf_counter() - begin_time )", "= graph1[1] lam1 = graph1[2] mu1 = graph1[3] # compute r1, s1 c", "1 count = 0 lines.append(graphs.readline().split('\\t')) while lines[ len(lines)-1 ] != ['']: lines.append(graphs.readline().split('\\t') )", "1 j = 1 count = 0 lines.append(graphs.readline().split('\\t')) while lines[ len(lines)-1 ] !=", "b2 = b1 + mu2 - mu1 mu0 = b1 + mu2 c", "lam2 if a2 >= 0 and lam0 < k0: # then carry on", "# check it's an integer P2 = ( (k2-N2)*S2 )/(n2-S2) if is_integer_num(P1) and", "26 # 208 75 30 25 # 208 81 24 36 #import time", "n-2*k0+lam0 < 0: continue #any of those would be invalid elif (n-k0-1)*mu0 !=", "in range(0,count): for j in range(i, count): line1 = lines[i] # reads the", "set in the form 'n k lambda mu' per line, e.g.: # 204", "0 and lam0 < k0: # then carry on if a1 < S1*S2/n2:", "top): # b1 < a1 in DGH but need to allow b1 <", "< n1: S2 = k2 + S1 - k1 k0 = k1 +", "item in list is now '' for i in range(0,count): for j in", "mu1 mu0 = b1 + mu2 c = lam0 - mu0 delta =", "or b2 < 0 or n-2*k0+mu0-2 < 0 or n-2*k0+lam0 < 0: continue", "50 40 # 205 68 15 26 # 208 75 30 25 #", "good P1 = ( (k1-N1)*S1 )/(n1-S1) # check it's an integer P2 =", "== a2 or b2 < 0 or n-2*k0+mu0-2 < 0 or n-2*k0+lam0 <", "c = lam2 - mu2 delta = c**2 + 4*(k2 - mu2) d", "s = ( c - d )/2 f = (( n-1 )*(-s) -", "line2 = lines[j] for entry in range(0,4): # converting text to integer g1[entry]", "sets for strongly regular designs admitting # strongly regular decomposition, in which two", "36 #import time #begin_time = time.perf_counter() from sys import argv script, infile, outfile", "= graph2[2] mu2 = graph2[3] # compute r2, s2 c = lam2 -", "# then all is good, keep going if (rho2 == r2 and sig2", "b1 < S1. b2 = b1 + mu2 - mu1 mu0 = b1", "compute r2, s2 c = lam2 - mu2 delta = c**2 + 4*(k2", "it's an integer P2 = ( (k2-N2)*S2 )/(n2-S2) if is_integer_num(P1) and is_integer_num(P2): rho1", "check_4_srd(g1,g2) # new sets are then appended to the results table results +=", "for srds arising from a pair of srgs g1 = [0,0,0,0] g2 =", "continue elif r*s*s - 2*r*r*s - r*r - k0*r + k0*s*s + 2*k0*s", "check these parameters first if b2 == a2 or b2 < 0 or", "< 0: continue #any of those would be invalid elif (n-k0-1)*mu0 != k0*(k0-lam0-1):", "else: S1 = 2 n = n1 + n2 while S1 < n1:", "- d )/2 n2 = graph2[0] k2 = graph2[1] lam2 = graph2[2] mu2", "== s1) or (rho1 == s1 and sig1 == r1): # then all", "n-1 )*(-s) - k0)/(r-s) # check these parameters first if b2 == a2", "j in range(i, count): line1 = lines[i] # reads the text, separated by", "75 30 25 # 208 81 24 36 #import time #begin_time = time.perf_counter()", "in range(0, S1): a2 = a1 + lam2 - lam1 lam0 = a1", "all pairs, including g2=g1. # Uses tabulate to write results with a header", "DGH but need to allow b1 < S1. b2 = b1 + mu2", "# the SRGs on the two fibres of the SRD. # Usage: python3.7", "rho2 = N2-P2 sig1 = -(S2-b2)/(a2-b2) sig2 = -(S1-b1)/(a1-b1) if (rho1 == r1", "line2[entry] ) newones = check_4_srd(g1,g2) # new sets are then appended to the", "in list is now '' for i in range(0,count): for j in range(i,", "c**2 + 4*(k2 - mu2) d = math.sqrt(delta) r2 = ( c +", "k0*(k0-lam0-1): continue elif r*s*s - 2*r*r*s - r*r - k0*r + k0*s*s +", "else: bot = 0 top = a1 for b1 in range(bot, top): #", "r*r - k0*r + k0*s*s + 2*k0*s < 0 or r*r*s - 2*r*s*s", "< 0 or r*r*s - 2*r*s*s - s*s - k0*s + k0*r*r +", "#print(\"N1 or N2 not integer\") else: continue #print(\"a2 or lam0 problem\") S1 +=", "# 208 75 30 25 # 208 81 24 36 #import time #begin_time", "params = open(outfile, 'w') results = [] # will be table of feasible", "those would be invalid elif (n-k0-1)*mu0 != k0*(k0-lam0-1): continue elif r*s*s - 2*r*r*s", "Krein parameters on gamma0 continue elif not is_integer_num(f): # checks integrality of eigenvalue", "is_integer_num(f): # checks integrality of eigenvalue multiplicities continue else: # carry on N1", "sig2 = -(S1-b1)/(a1-b1) if (rho1 == r1 and sig1 == s1) or (rho1", "= argv from tabulate import tabulate import math def is_integer_num(n): if isinstance(n, int):", "s1 c = lam1 - mu1 delta = c**2 + 4*(k1 - mu1)", "of main graphs = open(infile) params = open(outfile, 'w') results = [] #", "range(0,4): # converting text to integer g1[entry] = int( line1[entry] ) g2[entry] =", "\",\" \",\" \",\" \",\" \",S2,a2, b2, N2, P2]) else: continue # print(S1, \"failed", "on the two fibres of the SRD. # Usage: python3.7 SRD-onef.py <infile> <outfile>", "+ mu2 c = lam0 - mu0 delta = c**2 + 4*(k0 -", "this is because last item in list is now '' for i in", "continue #print(\"N1 or N2 not integer\") else: continue #print(\"a2 or lam0 problem\") S1", "from sys import argv script, infile, outfile = argv from tabulate import tabulate", "24 36 #import time #begin_time = time.perf_counter() from sys import argv script, infile,", "( (k1-N1)*S1 )/(n1-S1) # check it's an integer P2 = ( (k2-N2)*S2 )/(n2-S2)", "#print(\"a2 or lam0 problem\") S1 += 1 return(table) # end of function check_4_srd", "\"failed at eigenvalue stage\") else: continue # print(S1, \"failed at eigenvalue stage\") else:", "the form 'n k lambda mu' per line, e.g.: # 204 63 22", "(k2-N2)*S2 )/(n2-S2) if is_integer_num(P1) and is_integer_num(P2): rho1 = N1-P1 rho2 = N2-P2 sig1", "= math.sqrt(delta) r = ( c + d )/2 s = ( c", "lambda mu' per line, e.g.: # 204 63 22 18 # 204 28", "r2 and sig2 == s2) or (rho2 == s2 and sig2 == r2):", "Loops through the input file to compare all pairs, including g2=g1. # Uses", "if a2 >= 0 and lam0 < k0: # then carry on if", "s2 = ( c - d )/2 table = [] if k2 <", "# then good P1 = ( (k1-N1)*S1 )/(n1-S1) # check it's an integer", "and sig1 == s1) or (rho1 == s1 and sig1 == r1): #", "count = 0 lines.append(graphs.readline().split('\\t')) while lines[ len(lines)-1 ] != ['']: lines.append(graphs.readline().split('\\t') ) count", "= open(infile) params = open(outfile, 'w') results = [] # will be table", "at eigenvalue stage\") else: continue #print(\"P1 or P2 not integer\") else: continue #print(\"N1", "be table of feasible parameters for srds arising from a pair of srgs", "of eigenvalue multiplicities continue else: # carry on N1 = a2*k1/S2 N2 =", "text file of strongly regular graph parameters. # Output is feasible parameter sets", "0: # above checks Krein parameters on gamma0 continue elif not is_integer_num(f): #", "or n-2*k0+mu0-2 < 0 or n-2*k0+lam0 < 0: continue #any of those would", "# end of function check_4_srd #___________________________________ # start of main graphs = open(infile)", "# 205 96 50 40 # 205 68 15 26 # 208 75", "lam0 - mu0 delta = c**2 + 4*(k0 - mu0) d = math.sqrt(delta)", "is_integer_num(N1) and is_integer_num(N2) and n1 != S1 and n2 !=S2: # then good", "\"failed at eigenvalue stage\") else: continue #print(\"P1 or P2 not integer\") else: continue", ")/2 n2 = graph2[0] k2 = graph2[1] lam2 = graph2[2] mu2 = graph2[3]", "S1 += 1 return(table) # end of function check_4_srd #___________________________________ # start of", "the text, separated by spaces, into list line2 = lines[j] for entry in", "bot = 0 top = a1 for b1 in range(bot, top): # b1", "math.sqrt(delta) r1 = ( c + d )/2 s1 = ( c -", "= 0 top = a1 for b1 in range(bot, top): # b1 <", "(rho2 == s2 and sig2 == r2): # print out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1,", "\"n\", \"k\", \"lam\", \"mu\", \"r\", \"s\", \"S_i\", \"a_i\", \"b_i\", \"N_i\", \"P_i\"])) params.close() graphs.close()", "or r*r*s - 2*r*s*s - s*s - k0*s + k0*r*r + 2*k0*r <", "not is_integer_num(f): # checks integrality of eigenvalue multiplicities continue else: # carry on", "parameters on gamma0 continue elif not is_integer_num(f): # checks integrality of eigenvalue multiplicities", "parameters. # Output is feasible parameter sets for strongly regular designs admitting #", "= ( c + d )/2 s = ( c - d )/2", "Usage: python3.7 SRD-onef.py <infile> <outfile> # The infile should be a tab-separated text", "a1*k2/S1 if is_integer_num(N1) and is_integer_num(N2) and n1 != S1 and n2 !=S2: #", "text file of srgs i = 1 j = 1 count = 0", "table of feasible parameters for srds arising from a pair of srgs g1", "isinstance(n, int): return True if isinstance(n, float): return n.is_integer() return False def check_4_srd(graph1,", "<outfile> # The infile should be a tab-separated text file with one SRG", "would be invalid elif (n-k0-1)*mu0 != k0*(k0-lam0-1): continue elif r*s*s - 2*r*r*s -", "of srgs g1 = [0,0,0,0] g2 = [0,0,0,0] lines = [] # first,", "sig2 == s2) or (rho2 == s2 and sig2 == r2): # print", "entry in range(0,4): # converting text to integer g1[entry] = int( line1[entry] )", "lines[ len(lines)-1 ] != ['']: lines.append(graphs.readline().split('\\t') ) count = len(lines)-1 # this is", "# then carry on if a1 < S1*S2/n2: bot = a1+1 top =", "< 0 or n-2*k0+mu0-2 < 0 or n-2*k0+lam0 < 0: continue #any of", "def check_4_srd(graph1, graph2): # checks a given pair of srgs n1 = graph1[0]", "# Output is feasible parameter sets for strongly regular designs admitting # strongly", "be a tab-separated text file with one SRG parameter set in the form", "graph1[2] mu1 = graph1[3] # compute r1, s1 c = lam1 - mu1", "+ k0*s*s + 2*k0*s < 0 or r*r*s - 2*r*s*s - s*s -", "python3.7 SRD-onef.py <infile> <outfile> # The infile should be a tab-separated text file", "c + d )/2 s2 = ( c - d )/2 table =", "0: continue #any of those would be invalid elif (n-k0-1)*mu0 != k0*(k0-lam0-1): continue", "k2 < k1: S1 = 2 + k1 - k2 else: S1 =", "or P2 not integer\") else: continue #print(\"N1 or N2 not integer\") else: continue", "= lines[j] for entry in range(0,4): # converting text to integer g1[entry] =", "continue #any of those would be invalid elif (n-k0-1)*mu0 != k0*(k0-lam0-1): continue elif", "is good, keep going if (rho2 == r2 and sig2 == s2) or", "SRD. # Usage: python3.7 SRD-onef.py <infile> <outfile> # The infile should be a", "and lam0 < k0: # then carry on if a1 < S1*S2/n2: bot", "f = (( n-1 )*(-s) - k0)/(r-s) # check these parameters first if", "204 28 2 4 # 205 96 50 40 # 205 68 15", "not integer\") else: continue #print(\"N1 or N2 not integer\") else: continue #print(\"a2 or", "math.sqrt(delta) r2 = ( c + d )/2 s2 = ( c -", "81 24 36 #import time #begin_time = time.perf_counter() from sys import argv script,", "k2 else: S1 = 2 n = n1 + n2 while S1 <", "by spaces, into list line2 = lines[j] for entry in range(0,4): # converting", "(rho1 == r1 and sig1 == s1) or (rho1 == s1 and sig1", "for i in range(0,count): for j in range(i, count): line1 = lines[i] #", "= c**2 + 4*(k2 - mu2) d = math.sqrt(delta) r2 = ( c", "!= ['']: lines.append(graphs.readline().split('\\t') ) count = len(lines)-1 # this is because last item", "list is now '' for i in range(0,count): for j in range(i, count):", "form 'n k lambda mu' per line, e.g.: # 204 63 22 18", "if a1 < S1*S2/n2: bot = a1+1 top = S1 else: bot =", "keep going if (rho2 == r2 and sig2 == s2) or (rho2 ==", "if is_integer_num(P1) and is_integer_num(P2): rho1 = N1-P1 rho2 = N2-P2 sig1 = -(S2-b2)/(a2-b2)", "and sig1 == r1): # then all is good, keep going if (rho2", "# 204 63 22 18 # 204 28 2 4 # 205 96", "the SRGs on the two fibres of the SRD. # Usage: python3.7 SRD-onef.py", "True if isinstance(n, float): return n.is_integer() return False def check_4_srd(graph1, graph2): # checks", "i = 1 j = 1 count = 0 lines.append(graphs.readline().split('\\t')) while lines[ len(lines)-1", "P2]) else: continue # print(S1, \"failed at eigenvalue stage\") else: continue # print(S1,", "[] # first, extract a pair of lines from the text file of", "- k0*r + k0*s*s + 2*k0*s < 0 or r*r*s - 2*r*s*s -", "b1, N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\" \",\" \",\" \",\" \",S2,a2, b2, N2, P2])", "range(bot, top): # b1 < a1 in DGH but need to allow b1", "feasible parameters for srds arising from a pair of srgs g1 = [0,0,0,0]", ")*(-s) - k0)/(r-s) # check these parameters first if b2 == a2 or", "eigenvalue multiplicities continue else: # carry on N1 = a2*k1/S2 N2 = a1*k2/S1", "fibres of the SRD. # Usage: python3.7 SRD-onef.py <infile> <outfile> # The infile", "line, e.g.: # 204 63 22 18 # 204 28 2 4 #", "- mu1) d = math.sqrt(delta) r1 = ( c + d )/2 s1", "[] # will be table of feasible parameters for srds arising from a", "math def is_integer_num(n): if isinstance(n, int): return True if isinstance(n, float): return n.is_integer()", "srgs i = 1 j = 1 count = 0 lines.append(graphs.readline().split('\\t')) while lines[", "script, infile, outfile = argv from tabulate import tabulate import math def is_integer_num(n):", "k0*s + k0*r*r + 2*k0*r < 0: # above checks Krein parameters on", "in the form 'n k lambda mu' per line, e.g.: # 204 63", "per line, e.g.: # 204 63 22 18 # 204 28 2 4", "S1 < n1: S2 = k2 + S1 - k1 k0 = k1", "# reads the text, separated by spaces, into list line2 = lines[j] for", "table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\" \",\" \",\" \",\" \",S2,a2, b2, N2, P2]) else: continue #", "= N2-P2 sig1 = -(S2-b2)/(a2-b2) sig2 = -(S1-b1)/(a1-b1) if (rho1 == r1 and", "= graph2[3] # compute r2, s2 c = lam2 - mu2 delta =", "S1): a2 = a1 + lam2 - lam1 lam0 = a1 + lam2", "sig1 = -(S2-b2)/(a2-b2) sig2 = -(S1-b1)/(a1-b1) if (rho1 == r1 and sig1 ==", "parameters for srds arising from a pair of srgs g1 = [0,0,0,0] g2", "= c**2 + 4*(k1 - mu1) d = math.sqrt(delta) r1 = ( c", "+ 4*(k1 - mu1) d = math.sqrt(delta) r1 = ( c + d", "# Reads a single text file of strongly regular graph parameters. # Output", "which two graphs from the input file are # the SRGs on the", "through the input file to compare all pairs, including g2=g1. # Uses tabulate", "lam0 problem\") S1 += 1 return(table) # end of function check_4_srd #___________________________________ #", "s2 c = lam2 - mu2 delta = c**2 + 4*(k2 - mu2)", "208 81 24 36 #import time #begin_time = time.perf_counter() from sys import argv", "SRG parameter set in the form 'n k lambda mu' per line, e.g.:", "d = math.sqrt(delta) r2 = ( c + d )/2 s2 = (", "if b2 == a2 or b2 < 0 or n-2*k0+mu0-2 < 0 or", "count): line1 = lines[i] # reads the text, separated by spaces, into list", "205 68 15 26 # 208 75 30 25 # 208 81 24", "then appended to the results table results += newones # Loops through the", "regular decomposition, in which two graphs from the input file are # the", "text file with one SRG parameter set in the form 'n k lambda", "if isinstance(n, float): return n.is_integer() return False def check_4_srd(graph1, graph2): # checks a", "- k1 k0 = k1 + S2 for a1 in range(0, S1): a2", "a1 for b1 in range(bot, top): # b1 < a1 in DGH but", "#any of those would be invalid elif (n-k0-1)*mu0 != k0*(k0-lam0-1): continue elif r*s*s", "[\"n_i\", \"k_i\", \"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\", \"n\", \"k\", \"lam\", \"mu\", \"r\", \"s\", \"S_i\", \"a_i\",", "+ 2*k0*s < 0 or r*r*s - 2*r*s*s - s*s - k0*s +", "or N2 not integer\") else: continue #print(\"a2 or lam0 problem\") S1 += 1", "parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\" \",\" \",\" \",\" \",S2,a2, b2,", "# strongly regular decomposition, in which two graphs from the input file are", "main graphs = open(infile) params = open(outfile, 'w') results = [] # will", "b1 < a1 in DGH but need to allow b1 < S1. b2", "a tab-separated text file with one SRG parameter set in the form 'n", "return n.is_integer() return False def check_4_srd(graph1, graph2): # checks a given pair of", "to compare all pairs, including g2=g1. # Uses tabulate to write results with", "22 18 # 204 28 2 4 # 205 96 50 40 #", "import argv script, infile, outfile = argv from tabulate import tabulate import math", "pair of srgs n1 = graph1[0] k1 = graph1[1] lam1 = graph1[2] mu1", "- mu1 mu0 = b1 + mu2 c = lam0 - mu0 delta", "96 50 40 # 205 68 15 26 # 208 75 30 25", "delta = c**2 + 4*(k0 - mu0) d = math.sqrt(delta) r = (", "mu0 = b1 + mu2 c = lam0 - mu0 delta = c**2", "== r1 and sig1 == s1) or (rho1 == s1 and sig1 ==", "lam2 - mu2 delta = c**2 + 4*(k2 - mu2) d = math.sqrt(delta)", "start of main graphs = open(infile) params = open(outfile, 'w') results = []", "r2): # print out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\" \",\"", "# compute r2, s2 c = lam2 - mu2 delta = c**2 +", "== r2): # print out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\"", "15 26 # 208 75 30 25 # 208 81 24 36 #import", "+ k0*r*r + 2*k0*r < 0: # above checks Krein parameters on gamma0", "r = ( c + d )/2 s = ( c - d", "import math def is_integer_num(n): if isinstance(n, int): return True if isinstance(n, float): return", "(( n-1 )*(-s) - k0)/(r-s) # check these parameters first if b2 ==", "len(lines)-1 # this is because last item in list is now '' for", "a1 < S1*S2/n2: bot = a1+1 top = S1 else: bot = 0", "on if a1 < S1*S2/n2: bot = a1+1 top = S1 else: bot", "k1 = graph1[1] lam1 = graph1[2] mu1 = graph1[3] # compute r1, s1", "from tabulate import tabulate import math def is_integer_num(n): if isinstance(n, int): return True", "is_integer_num(n): if isinstance(n, int): return True if isinstance(n, float): return n.is_integer() return False", "+ 4*(k0 - mu0) d = math.sqrt(delta) r = ( c + d", "then good P1 = ( (k1-N1)*S1 )/(n1-S1) # check it's an integer P2", "outfile = argv from tabulate import tabulate import math def is_integer_num(n): if isinstance(n,", "mu1 delta = c**2 + 4*(k1 - mu1) d = math.sqrt(delta) r1 =", "is_integer_num(N2) and n1 != S1 and n2 !=S2: # then good P1 =", "checks integrality of eigenvalue multiplicities continue else: # carry on N1 = a2*k1/S2", "k0)/(r-s) # check these parameters first if b2 == a2 or b2 <", "from a pair of srgs g1 = [0,0,0,0] g2 = [0,0,0,0] lines =", "decomposition, in which two graphs from the input file are # the SRGs", "= lines[i] # reads the text, separated by spaces, into list line2 =", "'n k lambda mu' per line, e.g.: # 204 63 22 18 #", "2*k0*s < 0 or r*r*s - 2*r*s*s - s*s - k0*s + k0*r*r", "# print(S1, \"failed at eigenvalue stage\") else: continue #print(\"P1 or P2 not integer\")", "r1 = ( c + d )/2 s1 = ( c - d", "- d )/2 f = (( n-1 )*(-s) - k0)/(r-s) # check these", "and sig2 == s2) or (rho2 == s2 and sig2 == r2): #", "= 1 j = 1 count = 0 lines.append(graphs.readline().split('\\t')) while lines[ len(lines)-1 ]", "+ d )/2 s2 = ( c - d )/2 table = []", "c + d )/2 s1 = ( c - d )/2 n2 =", "headers = [\"n_i\", \"k_i\", \"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\", \"n\", \"k\", \"lam\", \"mu\", \"r\", \"s\",", "delta = c**2 + 4*(k2 - mu2) d = math.sqrt(delta) r2 = (", "+ 2*k0*r < 0: # above checks Krein parameters on gamma0 continue elif", "-(S2-b2)/(a2-b2) sig2 = -(S1-b1)/(a1-b1) if (rho1 == r1 and sig1 == s1) or", "argv from tabulate import tabulate import math def is_integer_num(n): if isinstance(n, int): return", "= b1 + mu2 - mu1 mu0 = b1 + mu2 c =", "g1 = [0,0,0,0] g2 = [0,0,0,0] lines = [] # first, extract a", "newones = check_4_srd(g1,g2) # new sets are then appended to the results table", "+ d )/2 s1 = ( c - d )/2 n2 = graph2[0]", "n1 != S1 and n2 !=S2: # then good P1 = ( (k1-N1)*S1", "P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\" \",\" \",\" \",\" \",S2,a2, b2, N2, P2]) else: continue", "- mu0) d = math.sqrt(delta) r = ( c + d )/2 s", "pair of srgs g1 = [0,0,0,0] g2 = [0,0,0,0] lines = [] #", "+ d )/2 s = ( c - d )/2 f = ((", "are # the SRGs on the two fibres of the SRD. # Usage:", "lam1 - mu1 delta = c**2 + 4*(k1 - mu1) d = math.sqrt(delta)", "parameters first if b2 == a2 or b2 < 0 or n-2*k0+mu0-2 <", ") g2[entry] = int( line2[entry] ) newones = check_4_srd(g1,g2) # new sets are", "\",\" \",\" \",\" \",\" \",\" \",S2,a2, b2, N2, P2]) else: continue # print(S1,", "first, extract a pair of lines from the text file of srgs i", "# above checks Krein parameters on gamma0 continue elif not is_integer_num(f): # checks", "graph parameters. # Output is feasible parameter sets for strongly regular designs admitting", "regular designs admitting # strongly regular decomposition, in which two graphs from the", "now '' for i in range(0,count): for j in range(i, count): line1 =", "elif (n-k0-1)*mu0 != k0*(k0-lam0-1): continue elif r*s*s - 2*r*r*s - r*r - k0*r", "204 63 22 18 # 204 28 2 4 # 205 96 50", "( c + d )/2 s2 = ( c - d )/2 table", "c - d )/2 f = (( n-1 )*(-s) - k0)/(r-s) # check", "print out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\" \",\" \",\" \",\"", "S1 - k1 k0 = k1 + S2 for a1 in range(0, S1):", "integer P2 = ( (k2-N2)*S2 )/(n2-S2) if is_integer_num(P1) and is_integer_num(P2): rho1 = N1-P1", ") newones = check_4_srd(g1,g2) # new sets are then appended to the results", "compare all pairs, including g2=g1. # Uses tabulate to write results with a", "into list line2 = lines[j] for entry in range(0,4): # converting text to", "1 return(table) # end of function check_4_srd #___________________________________ # start of main graphs", "SRGs on the two fibres of the SRD. # Usage: python3.7 SRD-onef.py <infile>", "( c + d )/2 s = ( c - d )/2 f", "(k1-N1)*S1 )/(n1-S1) # check it's an integer P2 = ( (k2-N2)*S2 )/(n2-S2) if", "and n1 != S1 and n2 !=S2: # then good P1 = (", "= time.perf_counter() from sys import argv script, infile, outfile = argv from tabulate", "N2 not integer\") else: continue #print(\"a2 or lam0 problem\") S1 += 1 return(table)", "r*s*s - 2*r*r*s - r*r - k0*r + k0*s*s + 2*k0*s < 0", "one SRG parameter set in the form 'n k lambda mu' per line,", "N2-P2 sig1 = -(S2-b2)/(a2-b2) sig2 = -(S1-b1)/(a1-b1) if (rho1 == r1 and sig1", "arising from a pair of srgs g1 = [0,0,0,0] g2 = [0,0,0,0] lines", "n = n1 + n2 while S1 < n1: S2 = k2 +", "< S1. b2 = b1 + mu2 - mu1 mu0 = b1 +", "Reads a single text file of strongly regular graph parameters. # Output is", "SRD-onef.py <infile> <outfile> # The infile should be a tab-separated text file with", "lines.append(graphs.readline().split('\\t') ) count = len(lines)-1 # this is because last item in list", "+= newones # Loops through the input file to compare all pairs, including", "given pair of srgs n1 = graph1[0] k1 = graph1[1] lam1 = graph1[2]", "continue # print(S1, \"failed at eigenvalue stage\") else: continue #print(\"P1 or P2 not", "lines.append(graphs.readline().split('\\t')) while lines[ len(lines)-1 ] != ['']: lines.append(graphs.readline().split('\\t') ) count = len(lines)-1 #", "b2, N2, P2]) else: continue # print(S1, \"failed at eigenvalue stage\") else: continue", "good, keep going if (rho2 == r2 and sig2 == s2) or (rho2", "( c - d )/2 n2 = graph2[0] k2 = graph2[1] lam2 =", "single text file of strongly regular graph parameters. # Output is feasible parameter", "S1*S2/n2: bot = a1+1 top = S1 else: bot = 0 top =", "False def check_4_srd(graph1, graph2): # checks a given pair of srgs n1 =", "+ lam2 if a2 >= 0 and lam0 < k0: # then carry", "designs admitting # strongly regular decomposition, in which two graphs from the input", "a pair of srgs g1 = [0,0,0,0] g2 = [0,0,0,0] lines = []", "table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\" \",\" \",\" \",\" \",S2,a2, b2, N2,", "k0*r + k0*s*s + 2*k0*s < 0 or r*r*s - 2*r*s*s - s*s", "+ S1 - k1 k0 = k1 + S2 for a1 in range(0,", "[0,0,0,0] g2 = [0,0,0,0] lines = [] # first, extract a pair of", "input file are # the SRGs on the two fibres of the SRD.", "len(lines)-1 ] != ['']: lines.append(graphs.readline().split('\\t') ) count = len(lines)-1 # this is because", "is_integer_num(P1) and is_integer_num(P2): rho1 = N1-P1 rho2 = N2-P2 sig1 = -(S2-b2)/(a2-b2) sig2", "sets are then appended to the results table results += newones # Loops", "multiplicities continue else: # carry on N1 = a2*k1/S2 N2 = a1*k2/S1 if", "k1: S1 = 2 + k1 - k2 else: S1 = 2 n", "open(infile) params = open(outfile, 'w') results = [] # will be table of", "\"mu_i\",\"rho_i\", \"sig_i\", \"n\", \"k\", \"lam\", \"mu\", \"r\", \"s\", \"S_i\", \"a_i\", \"b_i\", \"N_i\", \"P_i\"]))", "b1 + mu2 c = lam0 - mu0 delta = c**2 + 4*(k0", "with a header row. params.write(tabulate(results, headers = [\"n_i\", \"k_i\", \"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\", \"n\",", "graphs from the input file are # the SRGs on the two fibres", "= n1 + n2 while S1 < n1: S2 = k2 + S1", "pair of lines from the text file of srgs i = 1 j", "Uses tabulate to write results with a header row. params.write(tabulate(results, headers = [\"n_i\",", "of those would be invalid elif (n-k0-1)*mu0 != k0*(k0-lam0-1): continue elif r*s*s -", "gamma0 continue elif not is_integer_num(f): # checks integrality of eigenvalue multiplicities continue else:", "integer g1[entry] = int( line1[entry] ) g2[entry] = int( line2[entry] ) newones =", "need to allow b1 < S1. b2 = b1 + mu2 - mu1", "mu1 = graph1[3] # compute r1, s1 c = lam1 - mu1 delta", "but need to allow b1 < S1. b2 = b1 + mu2 -", "carry on N1 = a2*k1/S2 N2 = a1*k2/S1 if is_integer_num(N1) and is_integer_num(N2) and", "63 22 18 # 204 28 2 4 # 205 96 50 40", "continue else: # carry on N1 = a2*k1/S2 N2 = a1*k2/S1 if is_integer_num(N1)", "mu2) d = math.sqrt(delta) r2 = ( c + d )/2 s2 =", "# 208 81 24 36 #import time #begin_time = time.perf_counter() from sys import", "# 205 68 15 26 # 208 75 30 25 # 208 81", "tabulate to write results with a header row. params.write(tabulate(results, headers = [\"n_i\", \"k_i\",", "of strongly regular graph parameters. # Output is feasible parameter sets for strongly", "# carry on N1 = a2*k1/S2 N2 = a1*k2/S1 if is_integer_num(N1) and is_integer_num(N2)", "graph1[1] lam1 = graph1[2] mu1 = graph1[3] # compute r1, s1 c =", "k2 = graph2[1] lam2 = graph2[2] mu2 = graph2[3] # compute r2, s2", "4 # 205 96 50 40 # 205 68 15 26 # 208", "results = [] # will be table of feasible parameters for srds arising", "= (( n-1 )*(-s) - k0)/(r-s) # check these parameters first if b2", "invalid elif (n-k0-1)*mu0 != k0*(k0-lam0-1): continue elif r*s*s - 2*r*r*s - r*r -", "else: continue # print(S1, \"failed at eigenvalue stage\") else: continue # print(S1, \"failed", "the text file of srgs i = 1 j = 1 count =", "= 1 count = 0 lines.append(graphs.readline().split('\\t')) while lines[ len(lines)-1 ] != ['']: lines.append(graphs.readline().split('\\t')", "d )/2 s = ( c - d )/2 f = (( n-1", "not integer\") else: continue #print(\"a2 or lam0 problem\") S1 += 1 return(table) #", "admitting # strongly regular decomposition, in which two graphs from the input file", "= S1 else: bot = 0 top = a1 for b1 in range(bot,", "is feasible parameter sets for strongly regular designs admitting # strongly regular decomposition,", "b1 + mu2 - mu1 mu0 = b1 + mu2 c = lam0", "r2 = ( c + d )/2 s2 = ( c - d", "# new sets are then appended to the results table results += newones", "( (k2-N2)*S2 )/(n2-S2) if is_integer_num(P1) and is_integer_num(P2): rho1 = N1-P1 rho2 = N2-P2", "time.perf_counter() from sys import argv script, infile, outfile = argv from tabulate import", "c = lam1 - mu1 delta = c**2 + 4*(k1 - mu1) d", "file of strongly regular graph parameters. # Output is feasible parameter sets for", "= N1-P1 rho2 = N2-P2 sig1 = -(S2-b2)/(a2-b2) sig2 = -(S1-b1)/(a1-b1) if (rho1", "i in range(0,count): for j in range(i, count): line1 = lines[i] # reads", "integer\") else: continue #print(\"N1 or N2 not integer\") else: continue #print(\"a2 or lam0", "if is_integer_num(N1) and is_integer_num(N2) and n1 != S1 and n2 !=S2: # then", "if k2 < k1: S1 = 2 + k1 - k2 else: S1", "a2 = a1 + lam2 - lam1 lam0 = a1 + lam2 if", "= [] # first, extract a pair of lines from the text file", "\",\" \",\" \",S2,a2, b2, N2, P2]) else: continue # print(S1, \"failed at eigenvalue", "n-2*k0+mu0-2 < 0 or n-2*k0+lam0 < 0: continue #any of those would be", "regular graph parameters. # Output is feasible parameter sets for strongly regular designs", "0 top = a1 for b1 in range(bot, top): # b1 < a1", "'w') results = [] # will be table of feasible parameters for srds", "text to integer g1[entry] = int( line1[entry] ) g2[entry] = int( line2[entry] )", "continue # print(S1, \"failed at eigenvalue stage\") else: continue # print(S1, \"failed at", "N1 = a2*k1/S2 N2 = a1*k2/S1 if is_integer_num(N1) and is_integer_num(N2) and n1 !=", "0 or n-2*k0+lam0 < 0: continue #any of those would be invalid elif", "j = 1 count = 0 lines.append(graphs.readline().split('\\t')) while lines[ len(lines)-1 ] != ['']:", ") count = len(lines)-1 # this is because last item in list is", "4*(k0 - mu0) d = math.sqrt(delta) r = ( c + d )/2", "c - d )/2 n2 = graph2[0] k2 = graph2[1] lam2 = graph2[2]", "else: continue #print(\"N1 or N2 not integer\") else: continue #print(\"a2 or lam0 problem\")", "allow b1 < S1. b2 = b1 + mu2 - mu1 mu0 =", "== s1 and sig1 == r1): # then all is good, keep going", "lines[i] # reads the text, separated by spaces, into list line2 = lines[j]", "and is_integer_num(P2): rho1 = N1-P1 rho2 = N2-P2 sig1 = -(S2-b2)/(a2-b2) sig2 =", "# check these parameters first if b2 == a2 or b2 < 0", "file are # the SRGs on the two fibres of the SRD. #", "2 4 # 205 96 50 40 # 205 68 15 26 #", "- lam1 lam0 = a1 + lam2 if a2 >= 0 and lam0", "lines from the text file of srgs i = 1 j = 1", "< S1*S2/n2: bot = a1+1 top = S1 else: bot = 0 top", "then carry on if a1 < S1*S2/n2: bot = a1+1 top = S1", "30 25 # 208 81 24 36 #import time #begin_time = time.perf_counter() from", "= a1 for b1 in range(bot, top): # b1 < a1 in DGH", "def is_integer_num(n): if isinstance(n, int): return True if isinstance(n, float): return n.is_integer() return", "int( line2[entry] ) newones = check_4_srd(g1,g2) # new sets are then appended to", "28 2 4 # 205 96 50 40 # 205 68 15 26", "text, separated by spaces, into list line2 = lines[j] for entry in range(0,4):", "math.sqrt(delta) r = ( c + d )/2 s = ( c -", "all is good, keep going if (rho2 == r2 and sig2 == s2)", "= a1 + lam2 - lam1 lam0 = a1 + lam2 if a2", "( c - d )/2 f = (( n-1 )*(-s) - k0)/(r-s) #", "return True if isinstance(n, float): return n.is_integer() return False def check_4_srd(graph1, graph2): #", ")/2 s2 = ( c - d )/2 table = [] if k2", "< k0: # then carry on if a1 < S1*S2/n2: bot = a1+1", "0 or n-2*k0+mu0-2 < 0 or n-2*k0+lam0 < 0: continue #any of those", "lam2 - lam1 lam0 = a1 + lam2 if a2 >= 0 and", "0 or r*r*s - 2*r*s*s - s*s - k0*s + k0*r*r + 2*k0*r", "d )/2 f = (( n-1 )*(-s) - k0)/(r-s) # check these parameters", "results += newones # Loops through the input file to compare all pairs,", "params.write(tabulate(results, headers = [\"n_i\", \"k_i\", \"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\", \"n\", \"k\", \"lam\", \"mu\", \"r\",", "row. params.write(tabulate(results, headers = [\"n_i\", \"k_i\", \"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\", \"n\", \"k\", \"lam\", \"mu\",", "= math.sqrt(delta) r2 = ( c + d )/2 s2 = ( c", "these parameters first if b2 == a2 or b2 < 0 or n-2*k0+mu0-2", "be invalid elif (n-k0-1)*mu0 != k0*(k0-lam0-1): continue elif r*s*s - 2*r*r*s - r*r", "pairs, including g2=g1. # Uses tabulate to write results with a header row.", "of feasible parameters for srds arising from a pair of srgs g1 =", "# Uses tabulate to write results with a header row. params.write(tabulate(results, headers =", "P2 not integer\") else: continue #print(\"N1 or N2 not integer\") else: continue #print(\"a2", "if (rho1 == r1 and sig1 == s1) or (rho1 == s1 and", "= check_4_srd(g1,g2) # new sets are then appended to the results table results", "header row. params.write(tabulate(results, headers = [\"n_i\", \"k_i\", \"lam_i\", \"mu_i\",\"rho_i\", \"sig_i\", \"n\", \"k\", \"lam\",", "s1 = ( c - d )/2 n2 = graph2[0] k2 = graph2[1]", "- k0*s + k0*r*r + 2*k0*r < 0: # above checks Krein parameters", "208 75 30 25 # 208 81 24 36 #import time #begin_time =", "+ n2 while S1 < n1: S2 = k2 + S1 - k1", "\"lam\", \"mu\", \"r\", \"s\", \"S_i\", \"a_i\", \"b_i\", \"N_i\", \"P_i\"])) params.close() graphs.close() # print(", "= a1+1 top = S1 else: bot = 0 top = a1 for", "N2 = a1*k2/S1 if is_integer_num(N1) and is_integer_num(N2) and n1 != S1 and n2", "#import time #begin_time = time.perf_counter() from sys import argv script, infile, outfile =", "The infile should be a tab-separated text file with one SRG parameter set", "or (rho1 == s1 and sig1 == r1): # then all is good,", "( c + d )/2 s1 = ( c - d )/2 n2", "- k0)/(r-s) # check these parameters first if b2 == a2 or b2", "parameter set in the form 'n k lambda mu' per line, e.g.: #", "# first, extract a pair of lines from the text file of srgs", "= ( c + d )/2 s2 = ( c - d )/2", "d )/2 s2 = ( c - d )/2 table = [] if", "a1 + lam2 - lam1 lam0 = a1 + lam2 if a2 >=", "a2*k1/S2 N2 = a1*k2/S1 if is_integer_num(N1) and is_integer_num(N2) and n1 != S1 and", "strongly regular graph parameters. # Output is feasible parameter sets for strongly regular", "of the SRD. # Usage: python3.7 SRD-onef.py <infile> <outfile> # The infile should", "function check_4_srd #___________________________________ # start of main graphs = open(infile) params = open(outfile,", ")/2 s = ( c - d )/2 f = (( n-1 )*(-s)", "k1 - k2 else: S1 = 2 n = n1 + n2 while", "n1 = graph1[0] k1 = graph1[1] lam1 = graph1[2] mu1 = graph1[3] #", "S1 and n2 !=S2: # then good P1 = ( (k1-N1)*S1 )/(n1-S1) #", "separated by spaces, into list line2 = lines[j] for entry in range(0,4): #", "- s*s - k0*s + k0*r*r + 2*k0*r < 0: # above checks", "# checks integrality of eigenvalue multiplicities continue else: # carry on N1 =", "the input file are # the SRGs on the two fibres of the", "= graph1[3] # compute r1, s1 c = lam1 - mu1 delta =", "# will be table of feasible parameters for srds arising from a pair", "then all is good, keep going if (rho2 == r2 and sig2 ==", "return False def check_4_srd(graph1, graph2): # checks a given pair of srgs n1", "integrality of eigenvalue multiplicities continue else: # carry on N1 = a2*k1/S2 N2", "delta = c**2 + 4*(k1 - mu1) d = math.sqrt(delta) r1 = (", "tabulate import tabulate import math def is_integer_num(n): if isinstance(n, int): return True if", "k0: # then carry on if a1 < S1*S2/n2: bot = a1+1 top", "+ S2 for a1 in range(0, S1): a2 = a1 + lam2 -", "results table results += newones # Loops through the input file to compare", "a1 in range(0, S1): a2 = a1 + lam2 - lam1 lam0 =", "lines[j] for entry in range(0,4): # converting text to integer g1[entry] = int(", "if isinstance(n, int): return True if isinstance(n, float): return n.is_integer() return False def", "srds arising from a pair of srgs g1 = [0,0,0,0] g2 = [0,0,0,0]", "print(S1, \"failed at eigenvalue stage\") else: continue # print(S1, \"failed at eigenvalue stage\")", "converting text to integer g1[entry] = int( line1[entry] ) g2[entry] = int( line2[entry]", "g2=g1. # Uses tabulate to write results with a header row. params.write(tabulate(results, headers", "appended to the results table results += newones # Loops through the input", "bot = a1+1 top = S1 else: bot = 0 top = a1", "#___________________________________ # start of main graphs = open(infile) params = open(outfile, 'w') results", "check_4_srd #___________________________________ # start of main graphs = open(infile) params = open(outfile, 'w')", "of lines from the text file of srgs i = 1 j =", "print(S1, \"failed at eigenvalue stage\") else: continue #print(\"P1 or P2 not integer\") else:", "stage\") else: continue #print(\"P1 or P2 not integer\") else: continue #print(\"N1 or N2", "for strongly regular designs admitting # strongly regular decomposition, in which two graphs", "= open(outfile, 'w') results = [] # will be table of feasible parameters", "lam0 = a1 + lam2 if a2 >= 0 and lam0 < k0:", "N1-P1 rho2 = N2-P2 sig1 = -(S2-b2)/(a2-b2) sig2 = -(S1-b1)/(a1-b1) if (rho1 ==", "the two fibres of the SRD. # Usage: python3.7 SRD-onef.py <infile> <outfile> #", "tab-separated text file with one SRG parameter set in the form 'n k", "N2, P2]) else: continue # print(S1, \"failed at eigenvalue stage\") else: continue #", "mu0 delta = c**2 + 4*(k0 - mu0) d = math.sqrt(delta) r =", "carry on if a1 < S1*S2/n2: bot = a1+1 top = S1 else:", "N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\" \",\" \",\" \",\" \",S2,a2, b2, N2, P2]) else:", "else: continue #print(\"P1 or P2 not integer\") else: continue #print(\"N1 or N2 not", "for b1 in range(bot, top): # b1 < a1 in DGH but need", "# SRD-onef.py # Reads a single text file of strongly regular graph parameters.", "'' for i in range(0,count): for j in range(i, count): line1 = lines[i]", "r*r*s - 2*r*s*s - s*s - k0*s + k0*r*r + 2*k0*r < 0:", "continue elif not is_integer_num(f): # checks integrality of eigenvalue multiplicities continue else: #", "the results table results += newones # Loops through the input file to", "= int( line1[entry] ) g2[entry] = int( line2[entry] ) newones = check_4_srd(g1,g2) #", "\"k\", \"lam\", \"mu\", \"r\", \"s\", \"S_i\", \"a_i\", \"b_i\", \"N_i\", \"P_i\"])) params.close() graphs.close() #", "g1[entry] = int( line1[entry] ) g2[entry] = int( line2[entry] ) newones = check_4_srd(g1,g2)", "= [0,0,0,0] g2 = [0,0,0,0] lines = [] # first, extract a pair", "k0 = k1 + S2 for a1 in range(0, S1): a2 = a1", "line1[entry] ) g2[entry] = int( line2[entry] ) newones = check_4_srd(g1,g2) # new sets", "\"s\", \"S_i\", \"a_i\", \"b_i\", \"N_i\", \"P_i\"])) params.close() graphs.close() # print( time.perf_counter() - begin_time", "file with one SRG parameter set in the form 'n k lambda mu'", "# print out parameters table.append([n1,k1,lam1,mu1,rho1,sig1,n,k0,lam0,mu0,r,s,S1,a1, b1, N1, P1]) table.append([n2,k2,lam2,mu2,rho2,sig2,\" \",\" \",\" \",\" \",\"", "above checks Krein parameters on gamma0 continue elif not is_integer_num(f): # checks integrality", ")/(n1-S1) # check it's an integer P2 = ( (k2-N2)*S2 )/(n2-S2) if is_integer_num(P1)", "P2 = ( (k2-N2)*S2 )/(n2-S2) if is_integer_num(P1) and is_integer_num(P2): rho1 = N1-P1 rho2", "file of srgs i = 1 j = 1 count = 0 lines.append(graphs.readline().split('\\t'))", "checks a given pair of srgs n1 = graph1[0] k1 = graph1[1] lam1", "check it's an integer P2 = ( (k2-N2)*S2 )/(n2-S2) if is_integer_num(P1) and is_integer_num(P2):", "for j in range(i, count): line1 = lines[i] # reads the text, separated", "of srgs n1 = graph1[0] k1 = graph1[1] lam1 = graph1[2] mu1 =", "= 2 n = n1 + n2 while S1 < n1: S2 =", "r1): # then all is good, keep going if (rho2 == r2 and", "n.is_integer() return False def check_4_srd(graph1, graph2): # checks a given pair of srgs", "!= k0*(k0-lam0-1): continue elif r*s*s - 2*r*r*s - r*r - k0*r + k0*s*s", "problem\") S1 += 1 return(table) # end of function check_4_srd #___________________________________ # start", "integer\") else: continue #print(\"a2 or lam0 problem\") S1 += 1 return(table) # end", "a1 + lam2 if a2 >= 0 and lam0 < k0: # then", "from the input file are # the SRGs on the two fibres of", "(rho2 == r2 and sig2 == s2) or (rho2 == s2 and sig2", "or lam0 problem\") S1 += 1 return(table) # end of function check_4_srd #___________________________________", "graph2[0] k2 = graph2[1] lam2 = graph2[2] mu2 = graph2[3] # compute r2,", "sig1 == r1): # then all is good, keep going if (rho2 ==", "else: # carry on N1 = a2*k1/S2 N2 = a1*k2/S1 if is_integer_num(N1) and", "sys import argv script, infile, outfile = argv from tabulate import tabulate import" ]
[ "feature column in the raw data set. labels {pd.Series} -- Label corresponding to", "metafeatures_to_retain) \"\"\" idx_to_normalize: List[int] = [] idx_to_retain: List[int] = [] IGNORE_COLS = (", "Numerical columns - not normalize but retain for training: Features whose title ends", "but retain for training: Features whose title ends with `mark`. Remainder metafeatures are", "series: pd.Series) -> bool: return pd.api.types.is_numeric_dtype(series) @staticmethod def _split_features_and_labels( mds: pd.DataFrame, label_col: str", "stored in each featurizer's `sec_metafeatures` and `sec_test_metafeatures` attributes. These extracted metafeatures will then", "instance. \"\"\" for featurizer in self.featurizers: if type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures,", "raw data sets. This supports the MetaDataSetFeaturizerViaLambda class functionality. \"\"\" NUM_BASE_METAFEATURES = (", "whose title ends with `mark`. Remainder metafeatures are dropped. Note: Columns are tracked", ") -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]: raise NotImplementedError def _select_metafeatures( self, df:", "None: sec_feature_names = list(self.metafeatures) + [ name for featurizer in self.featurizers for name", "\"*\" ) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Select metafeatures to normalize and to retain", "retain for training: Features whose title ends with `mark`. Remainder metafeatures are dropped.", "to avoid problems when there are duplicated columnn names. Arguments: df {pd.DataFrame} --", "Type[RobustScaler] = None self.featurizers: List = [] self._multiprocess_raw_secondary: bool = False # Multiprocessing", "of the DataSetParser subclass instance. \"\"\" for featurizer in self.featurizers: if type(featurizer).__name__ ==", "attributes: src {Path} -- Path to data set file on disk. metafeatures {pd.DataFrame}", "from source.\"\"\" raise NotImplementedError @abstractmethod def featurize_base(self): \"\"\"Featurize base metafeatures.\"\"\" raise NotImplementedError @abstractmethod", "-- Column containing labels in the MetaDataSet. Returns: Tuple[pd.DataFrame, pd.Series] -- (features, labels)", "\"\"\"Init function.\"\"\" self.src: Path = None self.labels: pd.Series = None self.metafeatures: pd.DataFrame =", "-- A list of featurizers that performs secondary metafeaturizations. Class attributes: NUM_BASE_METAFEATURES {int}", "pd.concat( [self.test_metafeatures, *sec_test_metafeatures], axis=1, ignore_index=True, ) self.test_metafeatures.columns = sec_feature_names class DataSetParser(ABC, FeaturizerMixin): \"\"\"", "-- A scaler to handle normalize metafeatures before serving them for training. featurizers:", "indices instead of names to avoid problems when there are duplicated columnn names.", "provide secondary featurization functionality.\"\"\" def featurize_secondary(self): \"\"\" Perform secondary featurization. Sequentially trigger each", ") -> Tuple[pd.DataFrame, pd.Series]: \"\"\" Split features and labels. Arguments: mds {pd.DataFrame} --", "them for training. featurizers: {List} -- A list of featurizers that performs secondary", "-> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]: raise NotImplementedError def _select_metafeatures( self, df: pd.DataFrame,", "Returns: Tuple[pd.DataFrame, pd.DataFrame] -- (metafeatures_to_normalize, metafeatures_to_retain) \"\"\" idx_to_normalize: List[int] = [] idx_to_retain: List[int]", "and extract metafeatures from raw data sets. FeaturizerMixin provides the `.featurize` method. Instance", "set. labels {pd.Series} -- Label corresponding to each metafeature. test_src {Path} -- Optional", "attributes: NUM_BASE_METAFEATURES {int} -- Number of base metafeatures. Used to separate base and", "= None self.test_metafeatures: pd.DataFrame = None self.scaler: Type[RobustScaler] = None self.featurizers: List =", "extract secondary features. The extracted secondary metafeatures are stored in each featurizer's `sec_metafeatures`", "columns that should not be normlized but retained for training. Returns: Tuple[pd.DataFrame, pd.DataFrame]", "str = \"*\" ) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Select metafeatures to normalize and", "Path to data set file on disk. metafeatures {pd.DataFrame} -- Metafeatures extracted from", "These extracted metafeatures will then be collected and appended column-wise to the `metafeature`", "normlized but retained for training. Returns: Tuple[pd.DataFrame, pd.DataFrame] -- (metafeatures_to_normalize, metafeatures_to_retain) \"\"\" idx_to_normalize:", "sample columns which may be of type int \"total_val\", # Intent prediction should", "corresponding to each test metafeature row. scaler {RobustScaler} -- A scaler to handle", "std, num_nans, num_distincts) def __init__(self): \"\"\"Init function.\"\"\" self.src: Path = None self.labels: pd.Series", "the raw data set. labels {pd.Series} -- Label corresponding to each metafeature. test_src", "self.featurizers: List = [] self._multiprocess_raw_secondary: bool = False # Multiprocessing of raw dataframe(s)", "class to load and extract metafeatures from raw data sets. FeaturizerMixin provides the", "-- Number of base metafeatures. Used to separate base and secondary metafeatures. Abstract", "is not None: sec_metafeatures = [x.sec_metafeatures for x in self.featurizers] self.metafeatures = pd.concat(", "# into appropriate groups if col[-1] == \"*\": idx_to_retain.append(i) elif self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i)", "secondary feature names if self.metafeatures is not None: sec_feature_names = list(self.metafeatures) + [", "and `sec_test_metafeatures` attributes. These extracted metafeatures will then be collected and appended column-wise", "import ABC, abstractmethod from pathlib import Path from typing import Callable, Generator, List,", "keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, ) else: featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self): \"\"\"Add", "represented as ngrams \"sample\", # Ignore sample columns which may be of type", "metafeature row. scaler {RobustScaler} -- A scaler to handle normalize metafeatures before serving", "raw data set. labels {pd.Series} -- Label corresponding to each metafeature. test_src {Path}", "Class attributes: NUM_BASE_METAFEATURES {int} -- Number of base metafeatures. Used to separate base", "_create_raw_generator -- Returns a generator of raw data sets. This supports the MetaDataSetFeaturizerViaLambda", "import numpy as np import pandas as pd from sklearn.preprocessing import RobustScaler class", "to extract secondary features. The extracted secondary metafeatures are stored in each featurizer's", "self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self): \"\"\"Add secondary features to the training and test metafeature attributes.\"\"\"", "class DataSetParser(ABC, FeaturizerMixin): \"\"\" Abstract base class to load and extract metafeatures from", "if self.metafeatures is not None: sec_feature_names = list(self.metafeatures) + [ name for featurizer", ") self.test_metafeatures.columns = sec_feature_names class DataSetParser(ABC, FeaturizerMixin): \"\"\" Abstract base class to load", "Featurize base metafeatures. normalize_features -- Performs normalization on the metafeatures and test metafeatures", "to load and extract metafeatures from raw data sets. FeaturizerMixin provides the `.featurize`", "names. Arguments: df {pd.DataFrame} -- Metafeatures dataframe. mark {str} -- Character to append", "normalize but retain for training: Features whose title ends with `mark`. Remainder metafeatures", "] elif self.test_metafeatures is not None: sec_feature_names = list(self.test_metafeatures) + [ name for", "bool: return pd.api.types.is_numeric_dtype(series) @staticmethod def _split_features_and_labels( mds: pd.DataFrame, label_col: str ) -> Tuple[pd.DataFrame,", "metafeatures before serving them for training. featurizers: {List} -- A list of featurizers", "metafeatures. normalize_features -- Performs normalization on the metafeatures and test metafeatures (if provided).", "# Get secondary feature names if self.metafeatures is not None: sec_feature_names = list(self.metafeatures)", "-> Tuple[pd.DataFrame, pd.Series]: \"\"\" Split features and labels. Arguments: mds {pd.DataFrame} -- MetaDataSet.", "attributes. These extracted metafeatures will then be collected and appended column-wise to the", "of raw dataframe(s) @abstractmethod def load_data_set(self): \"\"\"Load data set from source.\"\"\" raise NotImplementedError", "from typing import Callable, Generator, List, Tuple, Type import numpy as np import", "pd.Series = None self.test_metafeatures: pd.DataFrame = None self.scaler: Type[RobustScaler] = None self.featurizers: List", "-- Path to data set file on disk. metafeatures {pd.DataFrame} -- Metafeatures extracted", "used. Metafeatures to: - normalize: Numerical columns - not normalize but retain for", "pd.DataFrame = None self.test_src: Path = None self.test_labels: pd.Series = None self.test_metafeatures: pd.DataFrame", "Optional path to test raw data set file on disk. This attribute applies", "featurizer in self.featurizers: if type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, )", "attribute applies more to the subclasses of MetaDataSetParser. test_metafeatures {pd.DataFrame} -- Optional metafeatures", "@abstractmethod def _create_raw_generator( self ) -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]: raise NotImplementedError", "from the test raw data set. test_labels {pd.Series} -- Optional labels corresponding to", "[self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True, ) self.metafeatures.columns = sec_feature_names if self.test_metafeatures is not None:", "self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, ) else: featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self):", "row. scaler {RobustScaler} -- A scaler to handle normalize metafeatures before serving them", "subclasses of MetaDataSetParser. test_metafeatures {pd.DataFrame} -- Optional metafeatures extracted from the test raw", "data set. test_labels {pd.Series} -- Optional labels corresponding to each test metafeature row.", "sets. This supports the MetaDataSetFeaturizerViaLambda class functionality. \"\"\" NUM_BASE_METAFEATURES = ( 7 )", "file on disk. metafeatures {pd.DataFrame} -- Metafeatures extracted from the raw data set.", "by indices instead of names to avoid problems when there are duplicated columnn", "\"attribute_name\", # Already represented as ngrams \"sample\", # Ignore sample columns which may", "features_to_normalize = df.iloc[:, idx_to_normalize] features_to_retain = df.iloc[:, idx_to_retain] return features_to_normalize, features_to_retain def _is_numeric(self,", "is not None: sec_feature_names = list(self.metafeatures) + [ name for featurizer in self.featurizers", "for x in self.featurizers] self.metafeatures = pd.concat( [self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True, ) self.metafeatures.columns", "idx_to_retain: List[int] = [] IGNORE_COLS = ( \"attribute_name\", # Already represented as ngrams", "self.test_labels: pd.Series = None self.test_metafeatures: pd.DataFrame = None self.scaler: Type[RobustScaler] = None self.featurizers:", "for featurizer in self.featurizers for name in featurizer.sec_feature_names ] elif self.test_metafeatures is not", "\"\"\" Abstract base class to load and extract metafeatures from raw data sets.", "on disk. metafeatures {pd.DataFrame} -- Metafeatures extracted from the raw data set. Each", "and to retain for training. The following criteria is used. Metafeatures to: -", "for x in self.featurizers ] self.test_metafeatures = pd.concat( [self.test_metafeatures, *sec_test_metafeatures], axis=1, ignore_index=True, )", "secondary featurization functionality.\"\"\" def featurize_secondary(self): \"\"\" Perform secondary featurization. Sequentially trigger each featurizer", "each featurizer to extract secondary features. The extracted secondary metafeatures are stored in", "labels corresponding to each test metafeature row. scaler {RobustScaler} -- A scaler to", "\"\"\"Featurize base metafeatures.\"\"\" raise NotImplementedError @abstractmethod def normalize_features(self): \"\"\"Normalize metafeatures for training.\"\"\" raise", "List = [] self._multiprocess_raw_secondary: bool = False # Multiprocessing of raw dataframe(s) @abstractmethod", "subclass instance. \"\"\" for featurizer in self.featurizers: if type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(),", "to handle normalize metafeatures before serving them for training. featurizers: {List} -- A", "to data set file on disk. metafeatures {pd.DataFrame} -- Metafeatures extracted from the", "source.\"\"\" raise NotImplementedError @abstractmethod def featurize_base(self): \"\"\"Featurize base metafeatures.\"\"\" raise NotImplementedError @abstractmethod def", "elif self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i) features_to_normalize = df.iloc[:, idx_to_normalize] features_to_retain = df.iloc[:, idx_to_retain] return", "sets. FeaturizerMixin provides the `.featurize` method. Instance attributes: src {Path} -- Path to", "`sec_metafeatures` and `sec_test_metafeatures` attributes. These extracted metafeatures will then be collected and appended", "each featurizer's `sec_metafeatures` and `sec_test_metafeatures` attributes. These extracted metafeatures will then be collected", "metafeatures from raw data sets. FeaturizerMixin provides the `.featurize` method. Instance attributes: src", "import RobustScaler class FeaturizerMixin: \"\"\"Mixin to provide secondary featurization functionality.\"\"\" def featurize_secondary(self): \"\"\"", "not None: sec_metafeatures = [x.sec_metafeatures for x in self.featurizers] self.metafeatures = pd.concat( [self.metafeatures,", "data sets. FeaturizerMixin provides the `.featurize` method. Instance attributes: src {Path} -- Path", "axis=1, ignore_index=True, ) self.test_metafeatures.columns = sec_feature_names class DataSetParser(ABC, FeaturizerMixin): \"\"\" Abstract base class", "method. Instance attributes: src {Path} -- Path to data set file on disk.", "to append to names of columns that should not be normlized but retained", "provides the `.featurize` method. Instance attributes: src {Path} -- Path to data set", "are duplicated columnn names. Arguments: df {pd.DataFrame} -- Metafeatures dataframe. mark {str} --", "to retain for training. The following criteria is used. Metafeatures to: - normalize:", "{pd.Series} -- Optional labels corresponding to each test metafeature row. scaler {RobustScaler} --", "self.metafeatures.columns = sec_feature_names if self.test_metafeatures is not None: sec_test_metafeatures = [ x.sec_test_metafeatures for", "raw data set. test_labels {pd.Series} -- Optional labels corresponding to each test metafeature", "prediction should not be based on # data points \"num_distincts\", # Use `normalized_distinct_rate`", "return pd.api.types.is_numeric_dtype(series) @staticmethod def _split_features_and_labels( mds: pd.DataFrame, label_col: str ) -> Tuple[pd.DataFrame, pd.Series]:", "metafeatures. Abstract methods: load_data_set -- Load the data set and perform necessarily cleaning", "self.test_src: Path = None self.test_labels: pd.Series = None self.test_metafeatures: pd.DataFrame = None self.scaler:", "extracted from the test raw data set. test_labels {pd.Series} -- Optional labels corresponding", "None self.featurizers: List = [] self._multiprocess_raw_secondary: bool = False # Multiprocessing of raw", "for training. featurizers: {List} -- A list of featurizers that performs secondary metafeaturizations.", "idx_to_retain] return features_to_normalize, features_to_retain def _is_numeric(self, series: pd.Series) -> bool: return pd.api.types.is_numeric_dtype(series) @staticmethod", "_is_numeric(self, series: pd.Series) -> bool: return pd.api.types.is_numeric_dtype(series) @staticmethod def _split_features_and_labels( mds: pd.DataFrame, label_col:", "List[int] = [] IGNORE_COLS = ( \"attribute_name\", # Already represented as ngrams \"sample\",", "Performs normalization on the metafeatures and test metafeatures (if provided). _create_raw_generator -- Returns", "not be normlized but retained for training. Returns: Tuple[pd.DataFrame, pd.DataFrame] -- (metafeatures_to_normalize, metafeatures_to_retain)", "bool = False # Multiprocessing of raw dataframe(s) @abstractmethod def load_data_set(self): \"\"\"Load data", "the data set and perform necessarily cleaning and parsing. featurize_base -- Featurize base", "should not be based on # data points \"num_distincts\", # Use `normalized_distinct_rate` instead", "sklearn.preprocessing import RobustScaler class FeaturizerMixin: \"\"\"Mixin to provide secondary featurization functionality.\"\"\" def featurize_secondary(self):", "instead \"num_nans\", # Captured in `nan_rate` ) for i, col in enumerate(df.columns): if", ") # Includes (total_val, min, max, mean, std, num_nans, num_distincts) def __init__(self): \"\"\"Init", "data points \"num_distincts\", # Use `normalized_distinct_rate` instead \"num_nans\", # Captured in `nan_rate` )", "- not normalize but retain for training: Features whose title ends with `mark`.", "metafeatures (if provided). _create_raw_generator -- Returns a generator of raw data sets. This", "self.src: Path = None self.labels: pd.Series = None self.metafeatures: pd.DataFrame = None self.test_src:", "Columns are tracked by indices instead of names to avoid problems when there", "= [ x.sec_test_metafeatures for x in self.featurizers ] self.test_metafeatures = pd.concat( [self.test_metafeatures, *sec_test_metafeatures],", "columns which may be of type int \"total_val\", # Intent prediction should not", "labels in the MetaDataSet. Returns: Tuple[pd.DataFrame, pd.Series] -- (features, labels) tuple. \"\"\" return", "Optional metafeatures extracted from the test raw data set. test_labels {pd.Series} -- Optional", "to the training and test metafeature attributes.\"\"\" # Get secondary feature names if", "in self.featurizers for name in featurizer.sec_feature_names ] if self.metafeatures is not None: sec_metafeatures", "# Captured in `nan_rate` ) for i, col in enumerate(df.columns): if col in", "( 7 ) # Includes (total_val, min, max, mean, std, num_nans, num_distincts) def", ") self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self): \"\"\"Add secondary features to the training and test metafeature", "pd.DataFrame, mark: str = \"*\" ) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Select metafeatures to", "pandas as pd from sklearn.preprocessing import RobustScaler class FeaturizerMixin: \"\"\"Mixin to provide secondary", "marked # into appropriate groups if col[-1] == \"*\": idx_to_retain.append(i) elif self._is_numeric(df.iloc[:, i]):", "(if provided). _create_raw_generator -- Returns a generator of raw data sets. This supports", "title ends with `mark`. Remainder metafeatures are dropped. Note: Columns are tracked by", "of MetaDataSetParser. test_metafeatures {pd.DataFrame} -- Optional metafeatures extracted from the test raw data", "if col in IGNORE_COLS: continue # Save columns that are either numeric or", "= sec_feature_names class DataSetParser(ABC, FeaturizerMixin): \"\"\" Abstract base class to load and extract", "if col[-1] == \"*\": idx_to_retain.append(i) elif self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i) features_to_normalize = df.iloc[:, idx_to_normalize]", "not normalize but retain for training: Features whose title ends with `mark`. Remainder", "in featurizer.sec_feature_names ] if self.metafeatures is not None: sec_metafeatures = [x.sec_metafeatures for x", "featurizer to extract secondary features. The extracted secondary metafeatures are stored in each", "[] self._multiprocess_raw_secondary: bool = False # Multiprocessing of raw dataframe(s) @abstractmethod def load_data_set(self):", "Path from typing import Callable, Generator, List, Tuple, Type import numpy as np", "retained for training. Returns: Tuple[pd.DataFrame, pd.DataFrame] -- (metafeatures_to_normalize, metafeatures_to_retain) \"\"\" idx_to_normalize: List[int] =", "each test metafeature row. scaler {RobustScaler} -- A scaler to handle normalize metafeatures", "type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, ) else: featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures,", "Type import numpy as np import pandas as pd from sklearn.preprocessing import RobustScaler", "FeaturizerMixin.\"\"\" from abc import ABC, abstractmethod from pathlib import Path from typing import", "raise NotImplementedError @abstractmethod def featurize_base(self): \"\"\"Featurize base metafeatures.\"\"\" raise NotImplementedError @abstractmethod def normalize_features(self):", "`nan_rate` ) for i, col in enumerate(df.columns): if col in IGNORE_COLS: continue #", "FeaturizerMixin: \"\"\"Mixin to provide secondary featurization functionality.\"\"\" def featurize_secondary(self): \"\"\" Perform secondary featurization.", "*sec_metafeatures], axis=1, ignore_index=True, ) self.metafeatures.columns = sec_feature_names if self.test_metafeatures is not None: sec_test_metafeatures", "metafeaturizations. Class attributes: NUM_BASE_METAFEATURES {int} -- Number of base metafeatures. Used to separate", "Column containing labels in the MetaDataSet. Returns: Tuple[pd.DataFrame, pd.Series] -- (features, labels) tuple.", "-- Metafeatures extracted from the raw data set. Each metafeature row corresponds to", "metafeatures and test metafeatures (if provided). _create_raw_generator -- Returns a generator of raw", "\"\"\" for featurizer in self.featurizers: if type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures,", "on disk. This attribute applies more to the subclasses of MetaDataSetParser. test_metafeatures {pd.DataFrame}", "dataframe(s) @abstractmethod def load_data_set(self): \"\"\"Load data set from source.\"\"\" raise NotImplementedError @abstractmethod def", "features and labels. Arguments: mds {pd.DataFrame} -- MetaDataSet. label_col {str} -- Column containing", "load_data_set -- Load the data set and perform necessarily cleaning and parsing. featurize_base", "= list(self.test_metafeatures) + [ name for featurizer in self.featurizers for name in featurizer.sec_feature_names", "{pd.DataFrame} -- Metafeatures dataframe. mark {str} -- Character to append to names of", "elif self.test_metafeatures is not None: sec_feature_names = list(self.test_metafeatures) + [ name for featurizer", "\"\"\" NUM_BASE_METAFEATURES = ( 7 ) # Includes (total_val, min, max, mean, std,", "col in enumerate(df.columns): if col in IGNORE_COLS: continue # Save columns that are", "= [] IGNORE_COLS = ( \"attribute_name\", # Already represented as ngrams \"sample\", #", "raw data set file on disk. This attribute applies more to the subclasses", "NUM_BASE_METAFEATURES = ( 7 ) # Includes (total_val, min, max, mean, std, num_nans,", "idx_to_retain.append(i) elif self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i) features_to_normalize = df.iloc[:, idx_to_normalize] features_to_retain = df.iloc[:, idx_to_retain]", "= \"*\" ) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Select metafeatures to normalize and to", "problems when there are duplicated columnn names. Arguments: df {pd.DataFrame} -- Metafeatures dataframe.", "df: pd.DataFrame, mark: str = \"*\" ) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Select metafeatures", "tracked by indices instead of names to avoid problems when there are duplicated", "disk. This attribute applies more to the subclasses of MetaDataSetParser. test_metafeatures {pd.DataFrame} --", "raise NotImplementedError @abstractmethod def _create_raw_generator( self ) -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]:", "\"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, ) else: featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures()", "appropriate groups if col[-1] == \"*\": idx_to_retain.append(i) elif self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i) features_to_normalize =", "in `nan_rate` ) for i, col in enumerate(df.columns): if col in IGNORE_COLS: continue", "DataSetParser ABC and FeaturizerMixin.\"\"\" from abc import ABC, abstractmethod from pathlib import Path", "import pandas as pd from sklearn.preprocessing import RobustScaler class FeaturizerMixin: \"\"\"Mixin to provide", "pd.Series]: \"\"\" Split features and labels. Arguments: mds {pd.DataFrame} -- MetaDataSet. label_col {str}", "is not None: sec_feature_names = list(self.test_metafeatures) + [ name for featurizer in self.featurizers", "= pd.concat( [self.test_metafeatures, *sec_test_metafeatures], axis=1, ignore_index=True, ) self.test_metafeatures.columns = sec_feature_names class DataSetParser(ABC, FeaturizerMixin):", "list(self.test_metafeatures) + [ name for featurizer in self.featurizers for name in featurizer.sec_feature_names ]", "None self.test_src: Path = None self.test_labels: pd.Series = None self.test_metafeatures: pd.DataFrame = None", "(total_val, min, max, mean, std, num_nans, num_distincts) def __init__(self): \"\"\"Init function.\"\"\" self.src: Path", "in featurizer.sec_feature_names ] elif self.test_metafeatures is not None: sec_feature_names = list(self.test_metafeatures) + [", "data set from source.\"\"\" raise NotImplementedError @abstractmethod def featurize_base(self): \"\"\"Featurize base metafeatures.\"\"\" raise", "dropped. Note: Columns are tracked by indices instead of names to avoid problems", "pd.DataFrame, label_col: str ) -> Tuple[pd.DataFrame, pd.Series]: \"\"\" Split features and labels. Arguments:", "{str} -- Character to append to names of columns that should not be", "of type int \"total_val\", # Intent prediction should not be based on #", "secondary features. The extracted secondary metafeatures are stored in each featurizer's `sec_metafeatures` and", "DataSetParser subclass instance. \"\"\" for featurizer in self.featurizers: if type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\": featurizer.featurize(", "metafeature. test_src {Path} -- Optional path to test raw data set file on", "applies more to the subclasses of MetaDataSetParser. test_metafeatures {pd.DataFrame} -- Optional metafeatures extracted", "Ignore sample columns which may be of type int \"total_val\", # Intent prediction", "corresponds to a feature column in the raw data set. labels {pd.Series} --", "`mark`. Remainder metafeatures are dropped. Note: Columns are tracked by indices instead of", "featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, ) else: featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures() def", "not None: sec_feature_names = list(self.test_metafeatures) + [ name for featurizer in self.featurizers for", "= None self.test_src: Path = None self.test_labels: pd.Series = None self.test_metafeatures: pd.DataFrame =", "self.featurizers: if type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, ) else: featurizer.featurize(", "based on # data points \"num_distincts\", # Use `normalized_distinct_rate` instead \"num_nans\", # Captured", "and test metafeature attributes.\"\"\" # Get secondary feature names if self.metafeatures is not", "False # Multiprocessing of raw dataframe(s) @abstractmethod def load_data_set(self): \"\"\"Load data set from", "The extracted secondary metafeatures are stored in each featurizer's `sec_metafeatures` and `sec_test_metafeatures` attributes.", "numeric or that have been marked # into appropriate groups if col[-1] ==", "of raw data sets. This supports the MetaDataSetFeaturizerViaLambda class functionality. \"\"\" NUM_BASE_METAFEATURES =", "Remainder metafeatures are dropped. Note: Columns are tracked by indices instead of names", "self.labels: pd.Series = None self.metafeatures: pd.DataFrame = None self.test_src: Path = None self.test_labels:", "min, max, mean, std, num_nans, num_distincts) def __init__(self): \"\"\"Init function.\"\"\" self.src: Path =", "= df.iloc[:, idx_to_retain] return features_to_normalize, features_to_retain def _is_numeric(self, series: pd.Series) -> bool: return", "NUM_BASE_METAFEATURES {int} -- Number of base metafeatures. Used to separate base and secondary", "name in featurizer.sec_feature_names ] elif self.test_metafeatures is not None: sec_feature_names = list(self.test_metafeatures) +", "extracted secondary metafeatures are stored in each featurizer's `sec_metafeatures` and `sec_test_metafeatures` attributes. These", "to normalize and to retain for training. The following criteria is used. Metafeatures", "that have been marked # into appropriate groups if col[-1] == \"*\": idx_to_retain.append(i)", "in the raw data set. labels {pd.Series} -- Label corresponding to each metafeature.", "base metafeatures. normalize_features -- Performs normalization on the metafeatures and test metafeatures (if", "and perform necessarily cleaning and parsing. featurize_base -- Featurize base metafeatures. normalize_features --", "provided). _create_raw_generator -- Returns a generator of raw data sets. This supports the", "7 ) # Includes (total_val, min, max, mean, std, num_nans, num_distincts) def __init__(self):", "List, Tuple, Type import numpy as np import pandas as pd from sklearn.preprocessing", "Arguments: df {pd.DataFrame} -- Metafeatures dataframe. mark {str} -- Character to append to", "List[int] = [] idx_to_retain: List[int] = [] IGNORE_COLS = ( \"attribute_name\", # Already", "Intent prediction should not be based on # data points \"num_distincts\", # Use", "are tracked by indices instead of names to avoid problems when there are", "i, col in enumerate(df.columns): if col in IGNORE_COLS: continue # Save columns that", "NotImplementedError @abstractmethod def _create_raw_generator( self ) -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]: raise", "in the MetaDataSet. Returns: Tuple[pd.DataFrame, pd.Series] -- (features, labels) tuple. \"\"\" return mds.drop(label_col,", "functionality.\"\"\" def featurize_secondary(self): \"\"\" Perform secondary featurization. Sequentially trigger each featurizer to extract", "label_col {str} -- Column containing labels in the MetaDataSet. Returns: Tuple[pd.DataFrame, pd.Series] --", "definition for the DataSetParser ABC and FeaturizerMixin.\"\"\" from abc import ABC, abstractmethod from", "be based on # data points \"num_distincts\", # Use `normalized_distinct_rate` instead \"num_nans\", #", "None self.labels: pd.Series = None self.metafeatures: pd.DataFrame = None self.test_src: Path = None", "the subclasses of MetaDataSetParser. test_metafeatures {pd.DataFrame} -- Optional metafeatures extracted from the test", "and FeaturizerMixin.\"\"\" from abc import ABC, abstractmethod from pathlib import Path from typing", "# Includes (total_val, min, max, mean, std, num_nans, num_distincts) def __init__(self): \"\"\"Init function.\"\"\"", "(metafeatures_to_normalize, metafeatures_to_retain) \"\"\" idx_to_normalize: List[int] = [] idx_to_retain: List[int] = [] IGNORE_COLS =", "sec_metafeatures = [x.sec_metafeatures for x in self.featurizers] self.metafeatures = pd.concat( [self.metafeatures, *sec_metafeatures], axis=1,", "performs secondary metafeaturizations. Class attributes: NUM_BASE_METAFEATURES {int} -- Number of base metafeatures. Used", "the metafeatures and test metafeatures (if provided). _create_raw_generator -- Returns a generator of", "self.metafeatures is not None: sec_metafeatures = [x.sec_metafeatures for x in self.featurizers] self.metafeatures =", "for training. Returns: Tuple[pd.DataFrame, pd.DataFrame] -- (metafeatures_to_normalize, metafeatures_to_retain) \"\"\" idx_to_normalize: List[int] = []", "@abstractmethod def featurize_base(self): \"\"\"Featurize base metafeatures.\"\"\" raise NotImplementedError @abstractmethod def normalize_features(self): \"\"\"Normalize metafeatures", "== \"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, ) else: featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, )", "base class to load and extract metafeatures from raw data sets. FeaturizerMixin provides", "of featurizers that performs secondary metafeaturizations. Class attributes: NUM_BASE_METAFEATURES {int} -- Number of", "continue # Save columns that are either numeric or that have been marked", "def _select_metafeatures( self, df: pd.DataFrame, mark: str = \"*\" ) -> Tuple[pd.DataFrame, pd.DataFrame]:", "collected and appended column-wise to the `metafeature` and `test_metafeature` attributes of the DataSetParser", "that performs secondary metafeaturizations. Class attributes: NUM_BASE_METAFEATURES {int} -- Number of base metafeatures.", "\"\"\"Normalize metafeatures for training.\"\"\" raise NotImplementedError @abstractmethod def _create_raw_generator( self ) -> Generator[Tuple[str,", "test metafeature attributes.\"\"\" # Get secondary feature names if self.metafeatures is not None:", "scaler to handle normalize metafeatures before serving them for training. featurizers: {List} --", "num_nans, num_distincts) def __init__(self): \"\"\"Init function.\"\"\" self.src: Path = None self.labels: pd.Series =", "\"\"\" Select metafeatures to normalize and to retain for training. The following criteria", "test_metafeatures {pd.DataFrame} -- Optional metafeatures extracted from the test raw data set. test_labels", "columnn names. Arguments: df {pd.DataFrame} -- Metafeatures dataframe. mark {str} -- Character to", "instead of names to avoid problems when there are duplicated columnn names. Arguments:", "Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]: raise NotImplementedError def _select_metafeatures( self, df: pd.DataFrame, mark:", "Generator, List, Tuple, Type import numpy as np import pandas as pd from", "[ name for featurizer in self.featurizers for name in featurizer.sec_feature_names ] if self.metafeatures", "been marked # into appropriate groups if col[-1] == \"*\": idx_to_retain.append(i) elif self._is_numeric(df.iloc[:,", "not None: sec_feature_names = list(self.metafeatures) + [ name for featurizer in self.featurizers for", "list(self.metafeatures) + [ name for featurizer in self.featurizers for name in featurizer.sec_feature_names ]", "set from source.\"\"\" raise NotImplementedError @abstractmethod def featurize_base(self): \"\"\"Featurize base metafeatures.\"\"\" raise NotImplementedError", "and labels. Arguments: mds {pd.DataFrame} -- MetaDataSet. label_col {str} -- Column containing labels", "avoid problems when there are duplicated columnn names. Arguments: df {pd.DataFrame} -- Metafeatures", "-- Character to append to names of columns that should not be normlized", "load_data_set(self): \"\"\"Load data set from source.\"\"\" raise NotImplementedError @abstractmethod def featurize_base(self): \"\"\"Featurize base", "raw data set. Each metafeature row corresponds to a feature column in the", "{pd.Series} -- Label corresponding to each metafeature. test_src {Path} -- Optional path to", "path to test raw data set file on disk. This attribute applies more", "typing import Callable, Generator, List, Tuple, Type import numpy as np import pandas", "def __add_secondary_metafeatures(self): \"\"\"Add secondary features to the training and test metafeature attributes.\"\"\" #", "base metafeatures.\"\"\" raise NotImplementedError @abstractmethod def normalize_features(self): \"\"\"Normalize metafeatures for training.\"\"\" raise NotImplementedError", "metafeature row corresponds to a feature column in the raw data set. labels", "None]: raise NotImplementedError def _select_metafeatures( self, df: pd.DataFrame, mark: str = \"*\" )", "[] idx_to_retain: List[int] = [] IGNORE_COLS = ( \"attribute_name\", # Already represented as", "data set. Each metafeature row corresponds to a feature column in the raw", "This attribute applies more to the subclasses of MetaDataSetParser. test_metafeatures {pd.DataFrame} -- Optional", "-- MetaDataSet. label_col {str} -- Column containing labels in the MetaDataSet. Returns: Tuple[pd.DataFrame,", "more to the subclasses of MetaDataSetParser. test_metafeatures {pd.DataFrame} -- Optional metafeatures extracted from", "or that have been marked # into appropriate groups if col[-1] == \"*\":", "featurizer in self.featurizers for name in featurizer.sec_feature_names ] elif self.test_metafeatures is not None:", "normalize_features -- Performs normalization on the metafeatures and test metafeatures (if provided). _create_raw_generator", "Metafeatures extracted from the raw data set. Each metafeature row corresponds to a", "test_src {Path} -- Optional path to test raw data set file on disk.", "idx_to_normalize: List[int] = [] idx_to_retain: List[int] = [] IGNORE_COLS = ( \"attribute_name\", #", "__init__(self): \"\"\"Init function.\"\"\" self.src: Path = None self.labels: pd.Series = None self.metafeatures: pd.DataFrame", "columns that are either numeric or that have been marked # into appropriate", "`sec_test_metafeatures` attributes. These extracted metafeatures will then be collected and appended column-wise to", "metafeature attributes.\"\"\" # Get secondary feature names if self.metafeatures is not None: sec_feature_names", ") else: featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self): \"\"\"Add secondary features to", "[ x.sec_test_metafeatures for x in self.featurizers ] self.test_metafeatures = pd.concat( [self.test_metafeatures, *sec_test_metafeatures], axis=1,", "name in featurizer.sec_feature_names ] if self.metafeatures is not None: sec_metafeatures = [x.sec_metafeatures for", "`.featurize` method. Instance attributes: src {Path} -- Path to data set file on", "of columns that should not be normlized but retained for training. Returns: Tuple[pd.DataFrame,", "[] IGNORE_COLS = ( \"attribute_name\", # Already represented as ngrams \"sample\", # Ignore", "numpy as np import pandas as pd from sklearn.preprocessing import RobustScaler class FeaturizerMixin:", "from abc import ABC, abstractmethod from pathlib import Path from typing import Callable,", "mds: pd.DataFrame, label_col: str ) -> Tuple[pd.DataFrame, pd.Series]: \"\"\" Split features and labels.", "= ( 7 ) # Includes (total_val, min, max, mean, std, num_nans, num_distincts)", "return features_to_normalize, features_to_retain def _is_numeric(self, series: pd.Series) -> bool: return pd.api.types.is_numeric_dtype(series) @staticmethod def", "to each test metafeature row. scaler {RobustScaler} -- A scaler to handle normalize", "to names of columns that should not be normlized but retained for training.", "mark {str} -- Character to append to names of columns that should not", "file on disk. This attribute applies more to the subclasses of MetaDataSetParser. test_metafeatures", "column in the raw data set. labels {pd.Series} -- Label corresponding to each", "] if self.metafeatures is not None: sec_metafeatures = [x.sec_metafeatures for x in self.featurizers]", "df.iloc[:, idx_to_retain] return features_to_normalize, features_to_retain def _is_numeric(self, series: pd.Series) -> bool: return pd.api.types.is_numeric_dtype(series)", "featurizer's `sec_metafeatures` and `sec_test_metafeatures` attributes. These extracted metafeatures will then be collected and", "Includes (total_val, min, max, mean, std, num_nans, num_distincts) def __init__(self): \"\"\"Init function.\"\"\" self.src:", "\"total_val\", # Intent prediction should not be based on # data points \"num_distincts\",", "pd.Series) -> bool: return pd.api.types.is_numeric_dtype(series) @staticmethod def _split_features_and_labels( mds: pd.DataFrame, label_col: str )", "Instance attributes: src {Path} -- Path to data set file on disk. metafeatures", "+ [ name for featurizer in self.featurizers for name in featurizer.sec_feature_names ] elif", "trigger each featurizer to extract secondary features. The extracted secondary metafeatures are stored", "test metafeatures (if provided). _create_raw_generator -- Returns a generator of raw data sets.", "append to names of columns that should not be normlized but retained for", "metafeatures.\"\"\" raise NotImplementedError @abstractmethod def normalize_features(self): \"\"\"Normalize metafeatures for training.\"\"\" raise NotImplementedError @abstractmethod", "parsing. featurize_base -- Featurize base metafeatures. normalize_features -- Performs normalization on the metafeatures", "the raw data set. Each metafeature row corresponds to a feature column in", "MetaDataSetFeaturizerViaLambda class functionality. \"\"\" NUM_BASE_METAFEATURES = ( 7 ) # Includes (total_val, min,", "{Path} -- Path to data set file on disk. metafeatures {pd.DataFrame} -- Metafeatures", "= None self.metafeatures: pd.DataFrame = None self.test_src: Path = None self.test_labels: pd.Series =", "are either numeric or that have been marked # into appropriate groups if", "Character to append to names of columns that should not be normlized but", "the MetaDataSet. Returns: Tuple[pd.DataFrame, pd.Series] -- (features, labels) tuple. \"\"\" return mds.drop(label_col, axis=1),", "a generator of raw data sets. This supports the MetaDataSetFeaturizerViaLambda class functionality. \"\"\"", "handle normalize metafeatures before serving them for training. featurizers: {List} -- A list", "class functionality. \"\"\" NUM_BASE_METAFEATURES = ( 7 ) # Includes (total_val, min, max,", "self.metafeatures = pd.concat( [self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True, ) self.metafeatures.columns = sec_feature_names if self.test_metafeatures", "-- Optional metafeatures extracted from the test raw data set. test_labels {pd.Series} --", "cleaning and parsing. featurize_base -- Featurize base metafeatures. normalize_features -- Performs normalization on", "from raw data sets. FeaturizerMixin provides the `.featurize` method. Instance attributes: src {Path}", "each metafeature. test_src {Path} -- Optional path to test raw data set file", "as ngrams \"sample\", # Ignore sample columns which may be of type int", "= [x.sec_metafeatures for x in self.featurizers] self.metafeatures = pd.concat( [self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True,", "metafeatures {pd.DataFrame} -- Metafeatures extracted from the raw data set. Each metafeature row", "in self.featurizers for name in featurizer.sec_feature_names ] elif self.test_metafeatures is not None: sec_feature_names", "self.test_metafeatures: pd.DataFrame = None self.scaler: Type[RobustScaler] = None self.featurizers: List = [] self._multiprocess_raw_secondary:", "self ) -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]: raise NotImplementedError def _select_metafeatures( self,", "which may be of type int \"total_val\", # Intent prediction should not be", "pd.DataFrame = None self.scaler: Type[RobustScaler] = None self.featurizers: List = [] self._multiprocess_raw_secondary: bool", "self.test_metafeatures is not None: sec_test_metafeatures = [ x.sec_test_metafeatures for x in self.featurizers ]", "the DataSetParser subclass instance. \"\"\" for featurizer in self.featurizers: if type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\":", "then be collected and appended column-wise to the `metafeature` and `test_metafeature` attributes of", "def featurize_base(self): \"\"\"Featurize base metafeatures.\"\"\" raise NotImplementedError @abstractmethod def normalize_features(self): \"\"\"Normalize metafeatures for", "not be based on # data points \"num_distincts\", # Use `normalized_distinct_rate` instead \"num_nans\",", "{int} -- Number of base metafeatures. Used to separate base and secondary metafeatures.", "may be of type int \"total_val\", # Intent prediction should not be based", "-> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Select metafeatures to normalize and to retain for training.", "Perform secondary featurization. Sequentially trigger each featurizer to extract secondary features. The extracted", "( \"attribute_name\", # Already represented as ngrams \"sample\", # Ignore sample columns which", "pathlib import Path from typing import Callable, Generator, List, Tuple, Type import numpy", "perform necessarily cleaning and parsing. featurize_base -- Featurize base metafeatures. normalize_features -- Performs", "Each metafeature row corresponds to a feature column in the raw data set.", "if self.test_metafeatures is not None: sec_test_metafeatures = [ x.sec_test_metafeatures for x in self.featurizers", "is used. Metafeatures to: - normalize: Numerical columns - not normalize but retain", "featurizer in self.featurizers for name in featurizer.sec_feature_names ] if self.metafeatures is not None:", "name for featurizer in self.featurizers for name in featurizer.sec_feature_names ] elif self.test_metafeatures is", "-- Label corresponding to each metafeature. test_src {Path} -- Optional path to test", "set and perform necessarily cleaning and parsing. featurize_base -- Featurize base metafeatures. normalize_features", "_split_features_and_labels( mds: pd.DataFrame, label_col: str ) -> Tuple[pd.DataFrame, pd.Series]: \"\"\" Split features and", "-- Metafeatures dataframe. mark {str} -- Character to append to names of columns", "-- Featurize base metafeatures. normalize_features -- Performs normalization on the metafeatures and test", "_create_raw_generator( self ) -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]: raise NotImplementedError def _select_metafeatures(", "self.featurizers ] self.test_metafeatures = pd.concat( [self.test_metafeatures, *sec_test_metafeatures], axis=1, ignore_index=True, ) self.test_metafeatures.columns = sec_feature_names", "Optional labels corresponding to each test metafeature row. scaler {RobustScaler} -- A scaler", "= pd.concat( [self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True, ) self.metafeatures.columns = sec_feature_names if self.test_metafeatures is", "-- Performs normalization on the metafeatures and test metafeatures (if provided). _create_raw_generator --", "Already represented as ngrams \"sample\", # Ignore sample columns which may be of", "featurizer.sec_feature_names ] if self.metafeatures is not None: sec_metafeatures = [x.sec_metafeatures for x in", "name for featurizer in self.featurizers for name in featurizer.sec_feature_names ] if self.metafeatures is", "a feature column in the raw data set. labels {pd.Series} -- Label corresponding", "the test raw data set. test_labels {pd.Series} -- Optional labels corresponding to each", "sec_test_metafeatures = [ x.sec_test_metafeatures for x in self.featurizers ] self.test_metafeatures = pd.concat( [self.test_metafeatures,", "max, mean, std, num_nans, num_distincts) def __init__(self): \"\"\"Init function.\"\"\" self.src: Path = None", "def _split_features_and_labels( mds: pd.DataFrame, label_col: str ) -> Tuple[pd.DataFrame, pd.Series]: \"\"\" Split features", "FeaturizerMixin): \"\"\" Abstract base class to load and extract metafeatures from raw data", "and appended column-wise to the `metafeature` and `test_metafeature` attributes of the DataSetParser subclass", "Select metafeatures to normalize and to retain for training. The following criteria is", "with `mark`. Remainder metafeatures are dropped. Note: Columns are tracked by indices instead", "None self.test_metafeatures: pd.DataFrame = None self.scaler: Type[RobustScaler] = None self.featurizers: List = []", "in IGNORE_COLS: continue # Save columns that are either numeric or that have", "for name in featurizer.sec_feature_names ] elif self.test_metafeatures is not None: sec_feature_names = list(self.test_metafeatures)", ") -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Select metafeatures to normalize and to retain for", "for training: Features whose title ends with `mark`. Remainder metafeatures are dropped. Note:", "ngrams \"sample\", # Ignore sample columns which may be of type int \"total_val\",", "\"\"\" Perform secondary featurization. Sequentially trigger each featurizer to extract secondary features. The", "_select_metafeatures( self, df: pd.DataFrame, mark: str = \"*\" ) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\"", "sec_feature_names class DataSetParser(ABC, FeaturizerMixin): \"\"\" Abstract base class to load and extract metafeatures", "of names to avoid problems when there are duplicated columnn names. Arguments: df", "This supports the MetaDataSetFeaturizerViaLambda class functionality. \"\"\" NUM_BASE_METAFEATURES = ( 7 ) #", "appended column-wise to the `metafeature` and `test_metafeature` attributes of the DataSetParser subclass instance.", "be normlized but retained for training. Returns: Tuple[pd.DataFrame, pd.DataFrame] -- (metafeatures_to_normalize, metafeatures_to_retain) \"\"\"", "num_distincts) def __init__(self): \"\"\"Init function.\"\"\" self.src: Path = None self.labels: pd.Series = None", "normalize: Numerical columns - not normalize but retain for training: Features whose title", "base metafeatures. Used to separate base and secondary metafeatures. Abstract methods: load_data_set --", "before serving them for training. featurizers: {List} -- A list of featurizers that", "to each metafeature. test_src {Path} -- Optional path to test raw data set", "{str} -- Column containing labels in the MetaDataSet. Returns: Tuple[pd.DataFrame, pd.Series] -- (features,", "def __init__(self): \"\"\"Init function.\"\"\" self.src: Path = None self.labels: pd.Series = None self.metafeatures:", "for training. The following criteria is used. Metafeatures to: - normalize: Numerical columns", "\"\"\"Mixin to provide secondary featurization functionality.\"\"\" def featurize_secondary(self): \"\"\" Perform secondary featurization. Sequentially", "labels {pd.Series} -- Label corresponding to each metafeature. test_src {Path} -- Optional path", "training.\"\"\" raise NotImplementedError @abstractmethod def _create_raw_generator( self ) -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None,", "set file on disk. metafeatures {pd.DataFrame} -- Metafeatures extracted from the raw data", "raise NotImplementedError @abstractmethod def normalize_features(self): \"\"\"Normalize metafeatures for training.\"\"\" raise NotImplementedError @abstractmethod def", "-- (metafeatures_to_normalize, metafeatures_to_retain) \"\"\" idx_to_normalize: List[int] = [] idx_to_retain: List[int] = [] IGNORE_COLS", "extracted from the raw data set. Each metafeature row corresponds to a feature", "pd.Series = None self.metafeatures: pd.DataFrame = None self.test_src: Path = None self.test_labels: pd.Series", "to provide secondary featurization functionality.\"\"\" def featurize_secondary(self): \"\"\" Perform secondary featurization. Sequentially trigger", "MetaDataSet. Returns: Tuple[pd.DataFrame, pd.Series] -- (features, labels) tuple. \"\"\" return mds.drop(label_col, axis=1), mds[label_col]", ") self.metafeatures.columns = sec_feature_names if self.test_metafeatures is not None: sec_test_metafeatures = [ x.sec_test_metafeatures", "test metafeature row. scaler {RobustScaler} -- A scaler to handle normalize metafeatures before", "mean, std, num_nans, num_distincts) def __init__(self): \"\"\"Init function.\"\"\" self.src: Path = None self.labels:", "features_to_normalize, features_to_retain def _is_numeric(self, series: pd.Series) -> bool: return pd.api.types.is_numeric_dtype(series) @staticmethod def _split_features_and_labels(", "IGNORE_COLS: continue # Save columns that are either numeric or that have been", "to separate base and secondary metafeatures. Abstract methods: load_data_set -- Load the data", "Sequentially trigger each featurizer to extract secondary features. The extracted secondary metafeatures are", "set file on disk. This attribute applies more to the subclasses of MetaDataSetParser.", "function.\"\"\" self.src: Path = None self.labels: pd.Series = None self.metafeatures: pd.DataFrame = None", "def _create_raw_generator( self ) -> Generator[Tuple[str, Callable[[], pd.DataFrame]], None, None]: raise NotImplementedError def", "test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, ) else: featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self): \"\"\"Add secondary", "self.featurizers] self.metafeatures = pd.concat( [self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True, ) self.metafeatures.columns = sec_feature_names if", "Tuple[pd.DataFrame, pd.DataFrame] -- (metafeatures_to_normalize, metafeatures_to_retain) \"\"\" idx_to_normalize: List[int] = [] idx_to_retain: List[int] =", "A list of featurizers that performs secondary metafeaturizations. Class attributes: NUM_BASE_METAFEATURES {int} --", "the MetaDataSetFeaturizerViaLambda class functionality. \"\"\" NUM_BASE_METAFEATURES = ( 7 ) # Includes (total_val,", "data set file on disk. metafeatures {pd.DataFrame} -- Metafeatures extracted from the raw", "{pd.DataFrame} -- MetaDataSet. label_col {str} -- Column containing labels in the MetaDataSet. Returns:", "duplicated columnn names. Arguments: df {pd.DataFrame} -- Metafeatures dataframe. mark {str} -- Character", "None: sec_metafeatures = [x.sec_metafeatures for x in self.featurizers] self.metafeatures = pd.concat( [self.metafeatures, *sec_metafeatures],", "Path = None self.test_labels: pd.Series = None self.test_metafeatures: pd.DataFrame = None self.scaler: Type[RobustScaler]", "# Already represented as ngrams \"sample\", # Ignore sample columns which may be", "# Multiprocessing of raw dataframe(s) @abstractmethod def load_data_set(self): \"\"\"Load data set from source.\"\"\"", "labels. Arguments: mds {pd.DataFrame} -- MetaDataSet. label_col {str} -- Column containing labels in", "abc import ABC, abstractmethod from pathlib import Path from typing import Callable, Generator,", "-- Optional labels corresponding to each test metafeature row. scaler {RobustScaler} -- A", "{List} -- A list of featurizers that performs secondary metafeaturizations. Class attributes: NUM_BASE_METAFEATURES", "-- Returns a generator of raw data sets. This supports the MetaDataSetFeaturizerViaLambda class", "`normalized_distinct_rate` instead \"num_nans\", # Captured in `nan_rate` ) for i, col in enumerate(df.columns):", "NotImplementedError def _select_metafeatures( self, df: pd.DataFrame, mark: str = \"*\" ) -> Tuple[pd.DataFrame,", "for name in featurizer.sec_feature_names ] if self.metafeatures is not None: sec_metafeatures = [x.sec_metafeatures", "DataSetParser(ABC, FeaturizerMixin): \"\"\" Abstract base class to load and extract metafeatures from raw", "col[-1] == \"*\": idx_to_retain.append(i) elif self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i) features_to_normalize = df.iloc[:, idx_to_normalize] features_to_retain", "self.scaler: Type[RobustScaler] = None self.featurizers: List = [] self._multiprocess_raw_secondary: bool = False #", "self.test_metafeatures is not None: sec_feature_names = list(self.test_metafeatures) + [ name for featurizer in", "list of featurizers that performs secondary metafeaturizations. Class attributes: NUM_BASE_METAFEATURES {int} -- Number", "+ [ name for featurizer in self.featurizers for name in featurizer.sec_feature_names ] if", "features_to_retain def _is_numeric(self, series: pd.Series) -> bool: return pd.api.types.is_numeric_dtype(series) @staticmethod def _split_features_and_labels( mds:", "{pd.DataFrame} -- Optional metafeatures extracted from the test raw data set. test_labels {pd.Series}", "None self.metafeatures: pd.DataFrame = None self.test_src: Path = None self.test_labels: pd.Series = None", "# Intent prediction should not be based on # data points \"num_distincts\", #", "\"num_nans\", # Captured in `nan_rate` ) for i, col in enumerate(df.columns): if col", "separate base and secondary metafeatures. Abstract methods: load_data_set -- Load the data set", "featurize_base(self): \"\"\"Featurize base metafeatures.\"\"\" raise NotImplementedError @abstractmethod def normalize_features(self): \"\"\"Normalize metafeatures for training.\"\"\"", "and secondary metafeatures. Abstract methods: load_data_set -- Load the data set and perform", "set. test_labels {pd.Series} -- Optional labels corresponding to each test metafeature row. scaler", "feature names if self.metafeatures is not None: sec_feature_names = list(self.metafeatures) + [ name", "normalize metafeatures before serving them for training. featurizers: {List} -- A list of", "self.metafeatures: pd.DataFrame = None self.test_src: Path = None self.test_labels: pd.Series = None self.test_metafeatures:", "type int \"total_val\", # Intent prediction should not be based on # data", "have been marked # into appropriate groups if col[-1] == \"*\": idx_to_retain.append(i) elif", "should not be normlized but retained for training. Returns: Tuple[pd.DataFrame, pd.DataFrame] -- (metafeatures_to_normalize,", "supports the MetaDataSetFeaturizerViaLambda class functionality. \"\"\" NUM_BASE_METAFEATURES = ( 7 ) # Includes", "features_to_retain = df.iloc[:, idx_to_retain] return features_to_normalize, features_to_retain def _is_numeric(self, series: pd.Series) -> bool:", "- normalize: Numerical columns - not normalize but retain for training: Features whose", "\"sample\", # Ignore sample columns which may be of type int \"total_val\", #", "extract metafeatures from raw data sets. FeaturizerMixin provides the `.featurize` method. Instance attributes:", "test raw data set. test_labels {pd.Series} -- Optional labels corresponding to each test", "are dropped. Note: Columns are tracked by indices instead of names to avoid", "if type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, ) else: featurizer.featurize( meta_df=self.metafeatures,", "on the metafeatures and test metafeatures (if provided). _create_raw_generator -- Returns a generator", "the `metafeature` and `test_metafeature` attributes of the DataSetParser subclass instance. \"\"\" for featurizer", "to the `metafeature` and `test_metafeature` attributes of the DataSetParser subclass instance. \"\"\" for", "-- Optional path to test raw data set file on disk. This attribute", "= [] self._multiprocess_raw_secondary: bool = False # Multiprocessing of raw dataframe(s) @abstractmethod def", "IGNORE_COLS = ( \"attribute_name\", # Already represented as ngrams \"sample\", # Ignore sample", "Path = None self.labels: pd.Series = None self.metafeatures: pd.DataFrame = None self.test_src: Path", "int \"total_val\", # Intent prediction should not be based on # data points", "training. featurizers: {List} -- A list of featurizers that performs secondary metafeaturizations. Class", "metafeatures will then be collected and appended column-wise to the `metafeature` and `test_metafeature`", "= None self.test_labels: pd.Series = None self.test_metafeatures: pd.DataFrame = None self.scaler: Type[RobustScaler] =", "generator of raw data sets. This supports the MetaDataSetFeaturizerViaLambda class functionality. \"\"\" NUM_BASE_METAFEATURES", "None self.test_labels: pd.Series = None self.test_metafeatures: pd.DataFrame = None self.scaler: Type[RobustScaler] = None", "Used to separate base and secondary metafeatures. Abstract methods: load_data_set -- Load the", "and test metafeatures (if provided). _create_raw_generator -- Returns a generator of raw data", "secondary features to the training and test metafeature attributes.\"\"\" # Get secondary feature", "training and test metafeature attributes.\"\"\" # Get secondary feature names if self.metafeatures is", "load and extract metafeatures from raw data sets. FeaturizerMixin provides the `.featurize` method.", "data set. labels {pd.Series} -- Label corresponding to each metafeature. test_src {Path} --", "from pathlib import Path from typing import Callable, Generator, List, Tuple, Type import", "self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i) features_to_normalize = df.iloc[:, idx_to_normalize] features_to_retain = df.iloc[:, idx_to_retain] return features_to_normalize,", "`metafeature` and `test_metafeature` attributes of the DataSetParser subclass instance. \"\"\" for featurizer in", "Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Select metafeatures to normalize and to retain for training. The", "Use `normalized_distinct_rate` instead \"num_nans\", # Captured in `nan_rate` ) for i, col in", "Split features and labels. Arguments: mds {pd.DataFrame} -- MetaDataSet. label_col {str} -- Column", "= list(self.metafeatures) + [ name for featurizer in self.featurizers for name in featurizer.sec_feature_names", "row corresponds to a feature column in the raw data set. labels {pd.Series}", "def load_data_set(self): \"\"\"Load data set from source.\"\"\" raise NotImplementedError @abstractmethod def featurize_base(self): \"\"\"Featurize", "features to the training and test metafeature attributes.\"\"\" # Get secondary feature names", "MetaDataSetParser. test_metafeatures {pd.DataFrame} -- Optional metafeatures extracted from the test raw data set.", "training. Returns: Tuple[pd.DataFrame, pd.DataFrame] -- (metafeatures_to_normalize, metafeatures_to_retain) \"\"\" idx_to_normalize: List[int] = [] idx_to_retain:", ") for i, col in enumerate(df.columns): if col in IGNORE_COLS: continue # Save", "on # data points \"num_distincts\", # Use `normalized_distinct_rate` instead \"num_nans\", # Captured in", "be collected and appended column-wise to the `metafeature` and `test_metafeature` attributes of the", "else: featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self): \"\"\"Add secondary features to the", "pd.DataFrame]: \"\"\" Select metafeatures to normalize and to retain for training. The following", "for the DataSetParser ABC and FeaturizerMixin.\"\"\" from abc import ABC, abstractmethod from pathlib", "in self.featurizers] self.metafeatures = pd.concat( [self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True, ) self.metafeatures.columns = sec_feature_names", "when there are duplicated columnn names. Arguments: df {pd.DataFrame} -- Metafeatures dataframe. mark", "Arguments: mds {pd.DataFrame} -- MetaDataSet. label_col {str} -- Column containing labels in the", "Features whose title ends with `mark`. Remainder metafeatures are dropped. Note: Columns are", "column-wise to the `metafeature` and `test_metafeature` attributes of the DataSetParser subclass instance. \"\"\"", "Abstract methods: load_data_set -- Load the data set and perform necessarily cleaning and", "in each featurizer's `sec_metafeatures` and `sec_test_metafeatures` attributes. These extracted metafeatures will then be", "col in IGNORE_COLS: continue # Save columns that are either numeric or that", "data sets. This supports the MetaDataSetFeaturizerViaLambda class functionality. \"\"\" NUM_BASE_METAFEATURES = ( 7", "there are duplicated columnn names. Arguments: df {pd.DataFrame} -- Metafeatures dataframe. mark {str}", "for featurizer in self.featurizers for name in featurizer.sec_feature_names ] if self.metafeatures is not", "None: sec_test_metafeatures = [ x.sec_test_metafeatures for x in self.featurizers ] self.test_metafeatures = pd.concat(", "idx_to_normalize] features_to_retain = df.iloc[:, idx_to_retain] return features_to_normalize, features_to_retain def _is_numeric(self, series: pd.Series) ->", "points \"num_distincts\", # Use `normalized_distinct_rate` instead \"num_nans\", # Captured in `nan_rate` ) for", "Note: Columns are tracked by indices instead of names to avoid problems when", "columns - not normalize but retain for training: Features whose title ends with", "names to avoid problems when there are duplicated columnn names. Arguments: df {pd.DataFrame}", "criteria is used. Metafeatures to: - normalize: Numerical columns - not normalize but", "raw dataframe(s) @abstractmethod def load_data_set(self): \"\"\"Load data set from source.\"\"\" raise NotImplementedError @abstractmethod", "to: - normalize: Numerical columns - not normalize but retain for training: Features", "scaler {RobustScaler} -- A scaler to handle normalize metafeatures before serving them for", "into appropriate groups if col[-1] == \"*\": idx_to_retain.append(i) elif self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i) features_to_normalize", "to the subclasses of MetaDataSetParser. test_metafeatures {pd.DataFrame} -- Optional metafeatures extracted from the", "test_labels {pd.Series} -- Optional labels corresponding to each test metafeature row. scaler {RobustScaler}", "@abstractmethod def normalize_features(self): \"\"\"Normalize metafeatures for training.\"\"\" raise NotImplementedError @abstractmethod def _create_raw_generator( self", "{Path} -- Optional path to test raw data set file on disk. This", "sec_feature_names if self.test_metafeatures is not None: sec_test_metafeatures = [ x.sec_test_metafeatures for x in", "pd.api.types.is_numeric_dtype(series) @staticmethod def _split_features_and_labels( mds: pd.DataFrame, label_col: str ) -> Tuple[pd.DataFrame, pd.Series]: \"\"\"", "{RobustScaler} -- A scaler to handle normalize metafeatures before serving them for training.", "featurization functionality.\"\"\" def featurize_secondary(self): \"\"\" Perform secondary featurization. Sequentially trigger each featurizer to", "] self.test_metafeatures = pd.concat( [self.test_metafeatures, *sec_test_metafeatures], axis=1, ignore_index=True, ) self.test_metafeatures.columns = sec_feature_names class", "sec_feature_names = list(self.test_metafeatures) + [ name for featurizer in self.featurizers for name in", "secondary featurization. Sequentially trigger each featurizer to extract secondary features. The extracted secondary", "pd from sklearn.preprocessing import RobustScaler class FeaturizerMixin: \"\"\"Mixin to provide secondary featurization functionality.\"\"\"", "functionality. \"\"\" NUM_BASE_METAFEATURES = ( 7 ) # Includes (total_val, min, max, mean,", "in self.featurizers ] self.test_metafeatures = pd.concat( [self.test_metafeatures, *sec_test_metafeatures], axis=1, ignore_index=True, ) self.test_metafeatures.columns =", "for featurizer in self.featurizers: if type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary,", "RobustScaler class FeaturizerMixin: \"\"\"Mixin to provide secondary featurization functionality.\"\"\" def featurize_secondary(self): \"\"\" Perform", "featurization. Sequentially trigger each featurizer to extract secondary features. The extracted secondary metafeatures", "normalization on the metafeatures and test metafeatures (if provided). _create_raw_generator -- Returns a", "self.featurizers for name in featurizer.sec_feature_names ] elif self.test_metafeatures is not None: sec_feature_names =", "= None self.labels: pd.Series = None self.metafeatures: pd.DataFrame = None self.test_src: Path =", "is not None: sec_test_metafeatures = [ x.sec_test_metafeatures for x in self.featurizers ] self.test_metafeatures", "A scaler to handle normalize metafeatures before serving them for training. featurizers: {List}", "and parsing. featurize_base -- Featurize base metafeatures. normalize_features -- Performs normalization on the", "# Save columns that are either numeric or that have been marked #", "@staticmethod def _split_features_and_labels( mds: pd.DataFrame, label_col: str ) -> Tuple[pd.DataFrame, pd.Series]: \"\"\" Split", "necessarily cleaning and parsing. featurize_base -- Featurize base metafeatures. normalize_features -- Performs normalization", "-- Load the data set and perform necessarily cleaning and parsing. featurize_base --", "training: Features whose title ends with `mark`. Remainder metafeatures are dropped. Note: Columns", "== \"*\": idx_to_retain.append(i) elif self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i) features_to_normalize = df.iloc[:, idx_to_normalize] features_to_retain =", "@abstractmethod def load_data_set(self): \"\"\"Load data set from source.\"\"\" raise NotImplementedError @abstractmethod def featurize_base(self):", "metafeatures extracted from the test raw data set. test_labels {pd.Series} -- Optional labels", "featurizer.sec_feature_names ] elif self.test_metafeatures is not None: sec_feature_names = list(self.test_metafeatures) + [ name", "FeaturizerMixin provides the `.featurize` method. Instance attributes: src {Path} -- Path to data", "raw data sets. FeaturizerMixin provides the `.featurize` method. Instance attributes: src {Path} --", "secondary metafeatures are stored in each featurizer's `sec_metafeatures` and `sec_test_metafeatures` attributes. These extracted", "groups if col[-1] == \"*\": idx_to_retain.append(i) elif self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i) features_to_normalize = df.iloc[:,", "np import pandas as pd from sklearn.preprocessing import RobustScaler class FeaturizerMixin: \"\"\"Mixin to", "\"\"\" idx_to_normalize: List[int] = [] idx_to_retain: List[int] = [] IGNORE_COLS = ( \"attribute_name\",", "= [] idx_to_retain: List[int] = [] IGNORE_COLS = ( \"attribute_name\", # Already represented", "data set file on disk. This attribute applies more to the subclasses of", "Save columns that are either numeric or that have been marked # into", "str ) -> Tuple[pd.DataFrame, pd.Series]: \"\"\" Split features and labels. Arguments: mds {pd.DataFrame}", "# Use `normalized_distinct_rate` instead \"num_nans\", # Captured in `nan_rate` ) for i, col", "pd.concat( [self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True, ) self.metafeatures.columns = sec_feature_names if self.test_metafeatures is not", "[x.sec_metafeatures for x in self.featurizers] self.metafeatures = pd.concat( [self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True, )", "be of type int \"total_val\", # Intent prediction should not be based on", "Callable[[], pd.DataFrame]], None, None]: raise NotImplementedError def _select_metafeatures( self, df: pd.DataFrame, mark: str", "attributes.\"\"\" # Get secondary feature names if self.metafeatures is not None: sec_feature_names =", "\"\"\"Add secondary features to the training and test metafeature attributes.\"\"\" # Get secondary", "def _is_numeric(self, series: pd.Series) -> bool: return pd.api.types.is_numeric_dtype(series) @staticmethod def _split_features_and_labels( mds: pd.DataFrame,", "Metafeatures to: - normalize: Numerical columns - not normalize but retain for training:", "import Callable, Generator, List, Tuple, Type import numpy as np import pandas as", "-> bool: return pd.api.types.is_numeric_dtype(series) @staticmethod def _split_features_and_labels( mds: pd.DataFrame, label_col: str ) ->", "mds {pd.DataFrame} -- MetaDataSet. label_col {str} -- Column containing labels in the MetaDataSet.", "metafeatures are stored in each featurizer's `sec_metafeatures` and `sec_test_metafeatures` attributes. These extracted metafeatures", "None, None]: raise NotImplementedError def _select_metafeatures( self, df: pd.DataFrame, mark: str = \"*\"", "self, df: pd.DataFrame, mark: str = \"*\" ) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Select", "The following criteria is used. Metafeatures to: - normalize: Numerical columns - not", "# data points \"num_distincts\", # Use `normalized_distinct_rate` instead \"num_nans\", # Captured in `nan_rate`", "Callable, Generator, List, Tuple, Type import numpy as np import pandas as pd", "corresponding to each metafeature. test_src {Path} -- Optional path to test raw data", "enumerate(df.columns): if col in IGNORE_COLS: continue # Save columns that are either numeric", "disk. metafeatures {pd.DataFrame} -- Metafeatures extracted from the raw data set. Each metafeature", "import Path from typing import Callable, Generator, List, Tuple, Type import numpy as", "\"num_distincts\", # Use `normalized_distinct_rate` instead \"num_nans\", # Captured in `nan_rate` ) for i,", "following criteria is used. Metafeatures to: - normalize: Numerical columns - not normalize", "if self.metafeatures is not None: sec_metafeatures = [x.sec_metafeatures for x in self.featurizers] self.metafeatures", "src {Path} -- Path to data set file on disk. metafeatures {pd.DataFrame} --", "\"\"\"Load data set from source.\"\"\" raise NotImplementedError @abstractmethod def featurize_base(self): \"\"\"Featurize base metafeatures.\"\"\"", "label_col: str ) -> Tuple[pd.DataFrame, pd.Series]: \"\"\" Split features and labels. Arguments: mds", "normalize_features(self): \"\"\"Normalize metafeatures for training.\"\"\" raise NotImplementedError @abstractmethod def _create_raw_generator( self ) ->", "test raw data set file on disk. This attribute applies more to the", "for i, col in enumerate(df.columns): if col in IGNORE_COLS: continue # Save columns", "None: sec_feature_names = list(self.test_metafeatures) + [ name for featurizer in self.featurizers for name", "metafeatures. Used to separate base and secondary metafeatures. Abstract methods: load_data_set -- Load", "i]): idx_to_normalize.append(i) features_to_normalize = df.iloc[:, idx_to_normalize] features_to_retain = df.iloc[:, idx_to_retain] return features_to_normalize, features_to_retain", "self.test_metafeatures.columns = sec_feature_names class DataSetParser(ABC, FeaturizerMixin): \"\"\" Abstract base class to load and", "def normalize_features(self): \"\"\"Normalize metafeatures for training.\"\"\" raise NotImplementedError @abstractmethod def _create_raw_generator( self )", "extracted metafeatures will then be collected and appended column-wise to the `metafeature` and", "{pd.DataFrame} -- Metafeatures extracted from the raw data set. Each metafeature row corresponds", "in self.featurizers: if type(featurizer).__name__ == \"RawDataSetFeaturizerViaLambda\": featurizer.featurize( self._create_raw_generator(), keys=self.metafeatures, test_keys=self.test_metafeatures, multiprocess=self._multiprocess_raw_secondary, ) else:", "names if self.metafeatures is not None: sec_feature_names = list(self.metafeatures) + [ name for", "from sklearn.preprocessing import RobustScaler class FeaturizerMixin: \"\"\"Mixin to provide secondary featurization functionality.\"\"\" def", "and `test_metafeature` attributes of the DataSetParser subclass instance. \"\"\" for featurizer in self.featurizers:", "metafeatures for training.\"\"\" raise NotImplementedError @abstractmethod def _create_raw_generator( self ) -> Generator[Tuple[str, Callable[[],", "Load the data set and perform necessarily cleaning and parsing. featurize_base -- Featurize", "for training.\"\"\" raise NotImplementedError @abstractmethod def _create_raw_generator( self ) -> Generator[Tuple[str, Callable[[], pd.DataFrame]],", "NotImplementedError @abstractmethod def featurize_base(self): \"\"\"Featurize base metafeatures.\"\"\" raise NotImplementedError @abstractmethod def normalize_features(self): \"\"\"Normalize", "set. Each metafeature row corresponds to a feature column in the raw data", "metafeatures to normalize and to retain for training. The following criteria is used.", "Abstract base class to load and extract metafeatures from raw data sets. FeaturizerMixin", "to a feature column in the raw data set. labels {pd.Series} -- Label", "self.metafeatures is not None: sec_feature_names = list(self.metafeatures) + [ name for featurizer in", "self._multiprocess_raw_secondary: bool = False # Multiprocessing of raw dataframe(s) @abstractmethod def load_data_set(self): \"\"\"Load", "Get secondary feature names if self.metafeatures is not None: sec_feature_names = list(self.metafeatures) +", "containing labels in the MetaDataSet. Returns: Tuple[pd.DataFrame, pd.Series] -- (features, labels) tuple. \"\"\"", "will then be collected and appended column-wise to the `metafeature` and `test_metafeature` attributes", "are stored in each featurizer's `sec_metafeatures` and `sec_test_metafeatures` attributes. These extracted metafeatures will", "featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self): \"\"\"Add secondary features to the training", "serving them for training. featurizers: {List} -- A list of featurizers that performs", "Tuple[pd.DataFrame, pd.Series]: \"\"\" Split features and labels. Arguments: mds {pd.DataFrame} -- MetaDataSet. label_col", "secondary metafeatures. Abstract methods: load_data_set -- Load the data set and perform necessarily", "def featurize_secondary(self): \"\"\" Perform secondary featurization. Sequentially trigger each featurizer to extract secondary", "names of columns that should not be normlized but retained for training. Returns:", "df {pd.DataFrame} -- Metafeatures dataframe. mark {str} -- Character to append to names", "that should not be normlized but retained for training. Returns: Tuple[pd.DataFrame, pd.DataFrame] --", "in enumerate(df.columns): if col in IGNORE_COLS: continue # Save columns that are either", "sec_feature_names = list(self.metafeatures) + [ name for featurizer in self.featurizers for name in", "= None self.featurizers: List = [] self._multiprocess_raw_secondary: bool = False # Multiprocessing of", "# Ignore sample columns which may be of type int \"total_val\", # Intent", "that are either numeric or that have been marked # into appropriate groups", "*sec_test_metafeatures], axis=1, ignore_index=True, ) self.test_metafeatures.columns = sec_feature_names class DataSetParser(ABC, FeaturizerMixin): \"\"\" Abstract base", "Metafeatures dataframe. mark {str} -- Character to append to names of columns that", "= ( \"attribute_name\", # Already represented as ngrams \"sample\", # Ignore sample columns", "= sec_feature_names if self.test_metafeatures is not None: sec_test_metafeatures = [ x.sec_test_metafeatures for x", "ABC and FeaturizerMixin.\"\"\" from abc import ABC, abstractmethod from pathlib import Path from", "but retained for training. Returns: Tuple[pd.DataFrame, pd.DataFrame] -- (metafeatures_to_normalize, metafeatures_to_retain) \"\"\" idx_to_normalize: List[int]", "metafeatures are dropped. Note: Columns are tracked by indices instead of names to", "the training and test metafeature attributes.\"\"\" # Get secondary feature names if self.metafeatures", "self.featurizers for name in featurizer.sec_feature_names ] if self.metafeatures is not None: sec_metafeatures =", "from the raw data set. Each metafeature row corresponds to a feature column", "as pd from sklearn.preprocessing import RobustScaler class FeaturizerMixin: \"\"\"Mixin to provide secondary featurization", "the DataSetParser ABC and FeaturizerMixin.\"\"\" from abc import ABC, abstractmethod from pathlib import", "not None: sec_test_metafeatures = [ x.sec_test_metafeatures for x in self.featurizers ] self.test_metafeatures =", "NotImplementedError @abstractmethod def normalize_features(self): \"\"\"Normalize metafeatures for training.\"\"\" raise NotImplementedError @abstractmethod def _create_raw_generator(", "raise NotImplementedError def _select_metafeatures( self, df: pd.DataFrame, mark: str = \"*\" ) ->", "attributes of the DataSetParser subclass instance. \"\"\" for featurizer in self.featurizers: if type(featurizer).__name__", "of base metafeatures. Used to separate base and secondary metafeatures. Abstract methods: load_data_set", "x in self.featurizers ] self.test_metafeatures = pd.concat( [self.test_metafeatures, *sec_test_metafeatures], axis=1, ignore_index=True, ) self.test_metafeatures.columns", "mark: str = \"*\" ) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Select metafeatures to normalize", "__add_secondary_metafeatures(self): \"\"\"Add secondary features to the training and test metafeature attributes.\"\"\" # Get", "pd.DataFrame]], None, None]: raise NotImplementedError def _select_metafeatures( self, df: pd.DataFrame, mark: str =", "retain for training. The following criteria is used. Metafeatures to: - normalize: Numerical", "pd.DataFrame] -- (metafeatures_to_normalize, metafeatures_to_retain) \"\"\" idx_to_normalize: List[int] = [] idx_to_retain: List[int] = []", "dataframe. mark {str} -- Character to append to names of columns that should", "`test_metafeature` attributes of the DataSetParser subclass instance. \"\"\" for featurizer in self.featurizers: if", "= df.iloc[:, idx_to_normalize] features_to_retain = df.iloc[:, idx_to_retain] return features_to_normalize, features_to_retain def _is_numeric(self, series:", "Label corresponding to each metafeature. test_src {Path} -- Optional path to test raw", "multiprocess=self._multiprocess_raw_secondary, ) else: featurizer.featurize( meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self): \"\"\"Add secondary features", "Returns a generator of raw data sets. This supports the MetaDataSetFeaturizerViaLambda class functionality.", "featurize_base -- Featurize base metafeatures. normalize_features -- Performs normalization on the metafeatures and", "= False # Multiprocessing of raw dataframe(s) @abstractmethod def load_data_set(self): \"\"\"Load data set", "either numeric or that have been marked # into appropriate groups if col[-1]", "meta_df=self.metafeatures, test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self): \"\"\"Add secondary features to the training and", "Captured in `nan_rate` ) for i, col in enumerate(df.columns): if col in IGNORE_COLS:", "df.iloc[:, idx_to_normalize] features_to_retain = df.iloc[:, idx_to_retain] return features_to_normalize, features_to_retain def _is_numeric(self, series: pd.Series)", "features. The extracted secondary metafeatures are stored in each featurizer's `sec_metafeatures` and `sec_test_metafeatures`", "ABC, abstractmethod from pathlib import Path from typing import Callable, Generator, List, Tuple,", "data set and perform necessarily cleaning and parsing. featurize_base -- Featurize base metafeatures.", "Tuple, Type import numpy as np import pandas as pd from sklearn.preprocessing import", "ignore_index=True, ) self.metafeatures.columns = sec_feature_names if self.test_metafeatures is not None: sec_test_metafeatures = [", "ends with `mark`. Remainder metafeatures are dropped. Note: Columns are tracked by indices", "x.sec_test_metafeatures for x in self.featurizers ] self.test_metafeatures = pd.concat( [self.test_metafeatures, *sec_test_metafeatures], axis=1, ignore_index=True,", "None self.scaler: Type[RobustScaler] = None self.featurizers: List = [] self._multiprocess_raw_secondary: bool = False", "secondary metafeaturizations. Class attributes: NUM_BASE_METAFEATURES {int} -- Number of base metafeatures. Used to", "\"*\": idx_to_retain.append(i) elif self._is_numeric(df.iloc[:, i]): idx_to_normalize.append(i) features_to_normalize = df.iloc[:, idx_to_normalize] features_to_retain = df.iloc[:,", "as np import pandas as pd from sklearn.preprocessing import RobustScaler class FeaturizerMixin: \"\"\"Mixin", "[ name for featurizer in self.featurizers for name in featurizer.sec_feature_names ] elif self.test_metafeatures", "featurizers that performs secondary metafeaturizations. Class attributes: NUM_BASE_METAFEATURES {int} -- Number of base", "= None self.scaler: Type[RobustScaler] = None self.featurizers: List = [] self._multiprocess_raw_secondary: bool =", "featurize_secondary(self): \"\"\" Perform secondary featurization. Sequentially trigger each featurizer to extract secondary features.", "self.test_metafeatures = pd.concat( [self.test_metafeatures, *sec_test_metafeatures], axis=1, ignore_index=True, ) self.test_metafeatures.columns = sec_feature_names class DataSetParser(ABC,", "to test raw data set file on disk. This attribute applies more to", "methods: load_data_set -- Load the data set and perform necessarily cleaning and parsing.", "base and secondary metafeatures. Abstract methods: load_data_set -- Load the data set and", "MetaDataSet. label_col {str} -- Column containing labels in the MetaDataSet. Returns: Tuple[pd.DataFrame, pd.Series]", "Multiprocessing of raw dataframe(s) @abstractmethod def load_data_set(self): \"\"\"Load data set from source.\"\"\" raise", "axis=1, ignore_index=True, ) self.metafeatures.columns = sec_feature_names if self.test_metafeatures is not None: sec_test_metafeatures =", "[self.test_metafeatures, *sec_test_metafeatures], axis=1, ignore_index=True, ) self.test_metafeatures.columns = sec_feature_names class DataSetParser(ABC, FeaturizerMixin): \"\"\" Abstract", "\"\"\" Split features and labels. Arguments: mds {pd.DataFrame} -- MetaDataSet. label_col {str} --", "the `.featurize` method. Instance attributes: src {Path} -- Path to data set file", "training. The following criteria is used. Metafeatures to: - normalize: Numerical columns -", "idx_to_normalize.append(i) features_to_normalize = df.iloc[:, idx_to_normalize] features_to_retain = df.iloc[:, idx_to_retain] return features_to_normalize, features_to_retain def", "x in self.featurizers] self.metafeatures = pd.concat( [self.metafeatures, *sec_metafeatures], axis=1, ignore_index=True, ) self.metafeatures.columns =", "class FeaturizerMixin: \"\"\"Mixin to provide secondary featurization functionality.\"\"\" def featurize_secondary(self): \"\"\" Perform secondary", "featurizers: {List} -- A list of featurizers that performs secondary metafeaturizations. Class attributes:", "test_meta_df=self.test_metafeatures, ) self.__add_secondary_metafeatures() def __add_secondary_metafeatures(self): \"\"\"Add secondary features to the training and test", "Number of base metafeatures. Used to separate base and secondary metafeatures. Abstract methods:", "normalize and to retain for training. The following criteria is used. Metafeatures to:", "ignore_index=True, ) self.test_metafeatures.columns = sec_feature_names class DataSetParser(ABC, FeaturizerMixin): \"\"\" Abstract base class to", "\"\"\"Class definition for the DataSetParser ABC and FeaturizerMixin.\"\"\" from abc import ABC, abstractmethod", "abstractmethod from pathlib import Path from typing import Callable, Generator, List, Tuple, Type" ]
[ "torch.sum(x, dim=-2, keepdim=False) # (..., dim, 6, 8) -> (..., dim, 8) x", "SeqLen, 24) attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen, 1) movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding,", "= nn.Embedding(value[0], value[1], padding_idx=0) else: self.embeddingGroups[key] = nn.Embedding(value[0], value[1]) self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev)", "dim=-1) # (..., dim, 1) -> (.... dim, E) x = torch.mul(x, attentionWeights)", "SeqLen, 24) product = torch.mul(x, target) # (B, SeqLen, 24) # product =", "# (B, 24) movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B, SeqLen, 24) attentionWeights", "dim, 8) elif self.mod == 'attention': attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (...,", "self.forward(m1,m2,a1,a2) loss = lossFunc(preds[:, 1], label.float(), reduction='mean') + self.regLoss() return loss def predict(self,", "self.dev = device self.MLP = MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate) # MLP self.output", "attention activation unit self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling layer self.to(self.dev) def", "self.MLP(x) x = F.softmax(self.output(x), dim=1) return x # (B, 2) def regLoss(self): totalRegLoss", ":, 1:]) # (B, SeqLen, 6, 8) movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:]", "torch.autograd import Variable class MLP(nn.Module): def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001):", "# MLP self.output = nn.Linear(MLPInfo[-1], 2) # output layer self.to(self.dev) def forward(self, m1,m2,a1,a2):", "product = F.softmax(product, dim=1) return x # (B, SeqLen, 1) class Dice(nn.Module): def", "(B, SeqLen, 8) #print(movieGenreFeat) #input() adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16) adsGenreFeat", "= movieFeature[movieIdSequence] adsFeat = movieFeature[ads] movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B, SeqLen,", "= torch.repeat_interleave(target, x.shape[-2], dim=1) # (B, SeqLen, 24) product = torch.mul(x, target) #", "isUseBN=False, dropoutRate=0.0, initStd=initStd) self.output = nn.Linear(attMLPInfo[-1], 1) def forward(self, x, target): target =", "1) def forward(self, x, target): target = torch.unsqueeze(target, dim=1) # (B, 1, 24)", "= self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0) # (B, 8) adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat),", "Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self, x): for layer in self.multiLayerPerceptron: x = layer(x)", "24) target = torch.repeat_interleave(target, x.shape[-2], dim=1) # (B, SeqLen, 24) product = torch.mul(x,", "self.eps = torch.FloatTensor([1e-8]).to(self.dev) def forward(self, x, mask): if self.mod == 'mean': length =", "SeqLen, 1) movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24) return movieSequenceEmbedding,adsEmbedding def forward_FR(self,", "keepdim=True) # (..., dim, 6) -> (...,dim, 1) x = torch.sum(x, dim=-2, keepdim=False)", "= l2RegEmbedding self.dev = device self.MLP = MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate) #", "8) adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B, 24) movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat),", "x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1) x = self.MLP(x) x = F.softmax(self.output(x), dim=1) return", "adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0) # (B, 8) adsEmbedding = torch.cat((adsIdFeat,", "#print(movieGenreFeat) #input() adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16) adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:])", "torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1) x = self.MLP(x) x = F.softmax(self.output(x), dim=1) return x #", "dim, E) -> (..., E) else: pass return x class AttentionActivationUnit(nn.Module): def __init__(self,", "target = torch.repeat_interleave(target, x.shape[-2], dim=1) # (B, SeqLen, 24) product = torch.mul(x, target)", "length.type(torch.float32) + self.eps) # (..., dim, 8) elif self.mod == 'attention': attentionWeights =", "torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B, SeqLen, 24) attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B,", "x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, 6, 8) -> (..., dim,", "activation, PReLuInit, initStd) # attention activation unit self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) # sequence", "x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, E) -> (..., E) else:", "import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class", "dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(Bottom, self).__init__() self.dev = device self.embeddingGroups = nn.ModuleDict() # embedding", "i in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1])) if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1])) actiFun", "= torch.FloatTensor([1e-8]).to(self.dev) def forward(self, x, mask): if self.mod == 'mean': length = torch.sum(mask.type(torch.float32),", "movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out class DIN(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo,", "init=PReLuInit) if activation == 'PReLU' else Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self, x): for", "movieFeatSequence = movieFeature[movieIdSequence] adsFeat = movieFeature[ads] movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B,", "# attention activation unit self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling layer self.to(self.dev)", "embeddingGroupInfo.items(): if key == 'MovieId' or key == 'Genre': self.embeddingGroups[key] = nn.Embedding(value[0], value[1],", "MLP(nn.Module): def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001): super(MLP, self).__init__() self.multiLayerPerceptron =", "'weight' in name: totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param) return totalRegLoss def loss(self, m1,m2,a1,a2,label,", "# (..., dim, 6, 8) -> (..., dim, 8) x = torch.div(x, length.type(torch.float32)", "= torch.cat((x, target, product), dim=2) # (B, SeqLen, 72) x = self.MLP(x) x", "nn.Linear(MLPInfo[-1], 2) # output layer self.to(self.dev) def forward(self, m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 #", "x, mask): if self.mod == 'mean': length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (...,", "def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit, self).__init__() self.MLP = MLP(attMLPInfo, activation, PReLuInit,", "F.softmax(product, dim=1) return x # (B, SeqLen, 1) class Dice(nn.Module): def __init__(self): super(Dice,", "isUseBN=True, dropoutRate=0.0, initStd=0.0001): super(MLP, self).__init__() self.multiLayerPerceptron = nn.ModuleList() # MLP for i in", "movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out class DIN(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo,", "E) else: pass return x class AttentionActivationUnit(nn.Module): def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001):", "dropoutRate) # MLP self.output = nn.Linear(MLPInfo[-1], 2) # output layer self.to(self.dev) def forward(self,", "x = layer(x) return x class Bottom(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU',", "(..., dim, 6) -> (...,dim, 1) x = torch.sum(x, dim=-2, keepdim=False) # (...,", "nn.ModuleDict() # embedding group for key, value in embeddingGroupInfo.items(): if key == 'MovieId'", "super(SequencePoolingLayer, self).__init__() self.mod = mod self.dev = device self.eps = torch.FloatTensor([1e-8]).to(self.dev) def forward(self,", "F from torch.autograd import Variable class MLP(nn.Module): def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True,", "if activation == 'PReLU' else Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self, x): for layer", "1:]) # (B, 6, 8) adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0) #", "forward(self, x): for layer in self.multiLayerPerceptron: x = layer(x) return x class Bottom(nn.Module):", "(..., dim, 1) -> (.... dim, E) x = torch.mul(x, attentionWeights) # (...,", "x = torch.mul(x, attentionWeights) # (..., dim, E) x = torch.sum(x, dim=-2, keepdim=False)", "adsEmbedding) # (B, SeqLen, 1) movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24) return", "return x class Bottom(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6,", "= nn.Linear(attMLPInfo[-1], 1) def forward(self, x, target): target = torch.unsqueeze(target, dim=1) # (B,", "x.shape[-2], dim=1) # (B, SeqLen, 24) product = torch.mul(x, target) # (B, SeqLen,", "group for key, value in embeddingGroupInfo.items(): if key == 'MovieId' or key ==", ":, 1:] > 0) # (B, SeqLen, 8) #print(movieGenreFeat) #input() adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:,", "x = self.output(x) # product = torch.sum(product, dim=-1, keepdim=True) # product = F.softmax(product,", "= F.softmax(product, dim=1) return x # (B, SeqLen, 1) class Dice(nn.Module): def __init__(self):", "+ self.eps) # (..., dim, 8) elif self.mod == 'attention': attentionWeights = torch.repeat_interleave(mask,", "SeqLen, 24) # product = torch.sum(product, dim=-1, keepdim=True) # (B, SeqLen, 1) x", "torch.cat((x, target, product), dim=2) # (B, SeqLen, 72) x = self.MLP(x) x =", "= AttentionActivationUnit(attMLPInfo, activation, PReLuInit, initStd) # attention activation unit self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev)", "PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(Bottom, self).__init__() self.dev = device self.embeddingGroups =", "MLPInfo[i + 1])) if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1])) actiFun = nn.PReLU(1, init=PReLuInit) if", "self.output = nn.Linear(attMLPInfo[-1], 1) def forward(self, x, target): target = torch.unsqueeze(target, dim=1) #", "# (B, SeqLen, 24) # product = torch.sum(product, dim=-1, keepdim=True) # (B, SeqLen,", "# (B, 1, 24) target = torch.repeat_interleave(target, x.shape[-2], dim=1) # (B, SeqLen, 24)", "== 'MovieId' or key == 'Genre': self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0) else: self.embeddingGroups[key]", "+ 1])) actiFun = nn.PReLU(1, init=PReLuInit) if activation == 'PReLU' else Dice() self.multiLayerPerceptron.append(actiFun)", "forward(self, movieIdSequence,ads, movieFeature): movieFeatSequence = movieFeature[movieIdSequence] adsFeat = movieFeature[ads] movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :,", "dim=-2, keepdim=False) # (..., dim, 6, 8) -> (..., dim, 8) x =", "# (B, 2) def regLoss(self): totalRegLoss = torch.zeros(size=(1,), device=self.dev) for name, param in", "= self.forward(m1,m2,a1,a2) loss = lossFunc(preds[:, 1], label.float(), reduction='mean') + self.regLoss() return loss def", "mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__() self.mod = mod self.dev = device self.eps = torch.FloatTensor([1e-8]).to(self.dev)", "activation='PReLu', PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit, self).__init__() self.MLP = MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd)", "self.to(self.dev) def forward(self, m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 # MLP inputs x = torch.cat((movieSequenceEmbedding,", "self.mod = mod self.dev = device self.eps = torch.FloatTensor([1e-8]).to(self.dev) def forward(self, x, mask):", "preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module): def __init__(self, mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__() self.mod = mod self.dev", "product), dim=2) # (B, SeqLen, 72) x = self.MLP(x) x = self.output(x) #", "name and 'MovieId' in name and 'weight' in name: totalRegLoss += torch.sum(self.l2RegEmbeddding *", "SeqLen, 1) x = torch.cat((x, target, product), dim=2) # (B, SeqLen, 72) x", "initStd=0.0001, device=torch.device('cpu')): super(Bottom, self).__init__() self.dev = device self.embeddingGroups = nn.ModuleDict() # embedding group", "self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16) adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6, 8)", "'Genre': self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0) else: self.embeddingGroups[key] = nn.Embedding(value[0], value[1]) self.sequenceMeanPooling =", "SequencePoolingLayer(nn.Module): def __init__(self, mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__() self.mod = mod self.dev = device", "self).__init__() self.dev = device self.embeddingGroups = nn.ModuleDict() # embedding group for key, value", "else: self.embeddingGroups[key] = nn.Embedding(value[0], value[1]) self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling layer", "torch.div(x, length.type(torch.float32) + self.eps) # (..., dim, 8) elif self.mod == 'attention': attentionWeights", "# (B, SeqLen, 6, 8) movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] > 0)", "adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B, 24) movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1)", "movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B, SeqLen, 16) movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :,", "def forward_FR(self, movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out class DIN(nn.Module): def __init__(self,", "predict(self, m1,m2,a1,a2): preds = self.forward(m1,m2,a1,a2)[:, 1] return preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module): def __init__(self, mod='mean',", "m1,m2,a1,a2): preds = self.forward(m1,m2,a1,a2)[:, 1] return preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module): def __init__(self, mod='mean', device=torch.device('cpu')):", "= self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16) adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6,", "self).__init__() self.mod = mod self.dev = device self.eps = torch.FloatTensor([1e-8]).to(self.dev) def forward(self, x,", "forward(self, m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 # MLP inputs x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1)", "1])) actiFun = nn.PReLU(1, init=PReLuInit) if activation == 'PReLU' else Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate))", "self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self, x): for layer in self.multiLayerPerceptron: x = layer(x) return", "1] return preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module): def __init__(self, mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__() self.mod =", "def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(DIN,", "= MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate) # MLP self.output = nn.Linear(MLPInfo[-1], 2) #", "= self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] > 0) # (B, SeqLen, 8) #print(movieGenreFeat) #input()", "product = torch.sum(product, dim=-1, keepdim=True) # (B, SeqLen, 1) x = torch.cat((x, target,", "and 'MovieId' in name and 'weight' in name: totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param)", "'embedding' in name and 'MovieId' in name and 'weight' in name: totalRegLoss +=", "torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable", "= movieFeature[ads] movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B, SeqLen, 16) movieGenreFeat =", "MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001): super(MLP, self).__init__() self.multiLayerPerceptron = nn.ModuleList() # MLP", "self.named_parameters(): if 'embedding' in name and 'MovieId' in name and 'weight' in name:", "== 'Genre': self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0) else: self.embeddingGroups[key] = nn.Embedding(value[0], value[1]) self.sequenceMeanPooling", "super(DIN, self).__init__() self.l2RegEmbeddding = l2RegEmbedding self.dev = device self.MLP = MLP(MLPInfo, activation, PReLuInit,", "torch.unsqueeze(target, dim=1) # (B, 1, 24) target = torch.repeat_interleave(target, x.shape[-2], dim=1) # (B,", "reduction='mean') + self.regLoss() return loss def predict(self, m1,m2,a1,a2): preds = self.forward(m1,m2,a1,a2)[:, 1] return", "= torch.div(x, length.type(torch.float32) + self.eps) # (..., dim, 8) elif self.mod == 'attention':", "layer self.to(self.dev) def forward(self, m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 # MLP inputs x =", "param*param) return totalRegLoss def loss(self, m1,m2,a1,a2,label, lossFunc): preds = self.forward(m1,m2,a1,a2) loss = lossFunc(preds[:,", "def __init__(self, mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__() self.mod = mod self.dev = device self.eps", "6) -> (...,dim, 1) x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, 6,", "l2RegEmbedding self.dev = device self.MLP = MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate) # MLP", "key, value in embeddingGroupInfo.items(): if key == 'MovieId' or key == 'Genre': self.embeddingGroups[key]", "dim, 8) x = torch.div(x, length.type(torch.float32) + self.eps) # (..., dim, 8) elif", "(B, 8) adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B, 24) movieEmbedding = torch.cat((movieIdFeat,", "layer self.to(self.dev) def forward(self, movieIdSequence,ads, movieFeature): movieFeatSequence = movieFeature[movieIdSequence] adsFeat = movieFeature[ads] movieIdFeat", "def forward(self, x): for layer in self.multiLayerPerceptron: x = layer(x) return x class", "torch.sum(product, dim=-1, keepdim=True) # (B, SeqLen, 1) x = torch.cat((x, target, product), dim=2)", "isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1])) actiFun = nn.PReLU(1, init=PReLuInit) if activation == 'PReLU' else", "0]) # (B, SeqLen, 16) movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B, SeqLen,", "torch.mul(x, attentionWeights) # (..., dim, E) x = torch.sum(x, dim=-2, keepdim=False) # (...,", "1:] > 0) # (B, 8) adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B,", "24) # product = torch.sum(product, dim=-1, keepdim=True) # (B, SeqLen, 1) x =", "return x # (B, SeqLen, 1) class Dice(nn.Module): def __init__(self): super(Dice, self).__init__() pass", "return x class AttentionActivationUnit(nn.Module): def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit, self).__init__() self.MLP", "dim=-1, keepdim=True) # product = F.softmax(product, dim=1) return x # (B, SeqLen, 1)", "# product = torch.sum(product, dim=-1, keepdim=True) # (B, SeqLen, 1) x = torch.cat((x,", "movieSequenceEmbedding,adsEmbedding def forward_FR(self, movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out class DIN(nn.Module): def", "out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out class DIN(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True,", "self.MLP = MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate) # MLP self.output = nn.Linear(MLPInfo[-1], 2)", "name and 'weight' in name: totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param) return totalRegLoss def", "dim=1) # (B, SeqLen, 24) product = torch.mul(x, target) # (B, SeqLen, 24)", "PReLuInit, isUseBN, dropoutRate) # MLP self.output = nn.Linear(MLPInfo[-1], 2) # output layer self.to(self.dev)", "for key, value in embeddingGroupInfo.items(): if key == 'MovieId' or key == 'Genre':", "loss def predict(self, m1,m2,a1,a2): preds = self.forward(m1,m2,a1,a2)[:, 1] return preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module): def", "keepdim=False) # (..., dim, E) -> (..., E) else: pass return x class", "target, product), dim=2) # (B, SeqLen, 72) x = self.MLP(x) x = self.output(x)", "E) x = torch.mul(x, attentionWeights) # (..., dim, E) x = torch.sum(x, dim=-2,", "self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] > 0) # (B, SeqLen, 8) #print(movieGenreFeat) #input() adsIdFeat", "for layer in self.multiLayerPerceptron: x = layer(x) return x class Bottom(nn.Module): def __init__(self,", "mod self.dev = device self.eps = torch.FloatTensor([1e-8]).to(self.dev) def forward(self, x, mask): if self.mod", "dim, E) x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, E) -> (...,", "dropoutRate=0.0, initStd=initStd) self.output = nn.Linear(attMLPInfo[-1], 1) def forward(self, x, target): target = torch.unsqueeze(target,", "x = torch.div(x, length.type(torch.float32) + self.eps) # (..., dim, 8) elif self.mod ==", "self.regLoss() return loss def predict(self, m1,m2,a1,a2): preds = self.forward(m1,m2,a1,a2)[:, 1] return preds.cpu().detach().numpy() class", "m1,m2,a1,a2,label, lossFunc): preds = self.forward(m1,m2,a1,a2) loss = lossFunc(preds[:, 1], label.float(), reduction='mean') + self.regLoss()", "= torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B, SeqLen, 24) attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) #", "# sequence pooling layer self.to(self.dev) def forward(self, movieIdSequence,ads, movieFeature): movieFeatSequence = movieFeature[movieIdSequence] adsFeat", "name: totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param) return totalRegLoss def loss(self, m1,m2,a1,a2,label, lossFunc): preds", "torch.sum(self.l2RegEmbeddding * param*param) return totalRegLoss def loss(self, m1,m2,a1,a2,label, lossFunc): preds = self.forward(m1,m2,a1,a2) loss", "self.mod == 'mean': length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (..., dim, 6) ->", "+ 1])) if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1])) actiFun = nn.PReLU(1, init=PReLuInit) if activation", "dim, 6, 8) -> (..., dim, 8) x = torch.div(x, length.type(torch.float32) + self.eps)", "AttentionActivationUnit(nn.Module): def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit, self).__init__() self.MLP = MLP(attMLPInfo, activation,", "__init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit, self).__init__() self.MLP = MLP(attMLPInfo, activation, PReLuInit, isUseBN=False,", "self.multiLayerPerceptron: x = layer(x) return x class Bottom(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo,", "= device self.embeddingGroups = nn.ModuleDict() # embedding group for key, value in embeddingGroupInfo.items():", "= SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling layer self.to(self.dev) def forward(self, movieIdSequence,ads, movieFeature): movieFeatSequence", "device=torch.device('cpu')): super(Bottom, self).__init__() self.dev = device self.embeddingGroups = nn.ModuleDict() # embedding group for", "dim, 1) -> (.... dim, E) x = torch.mul(x, attentionWeights) # (..., dim,", "6, 8) adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0) # (B, 8) adsEmbedding", "keepdim=True) # (B, SeqLen, 1) x = torch.cat((x, target, product), dim=2) # (B,", "embedding group for key, value in embeddingGroupInfo.items(): if key == 'MovieId' or key", "return totalRegLoss def loss(self, m1,m2,a1,a2,label, lossFunc): preds = self.forward(m1,m2,a1,a2) loss = lossFunc(preds[:, 1],", "(B, SeqLen, 24) product = torch.mul(x, target) # (B, SeqLen, 24) # product", "# (B, SeqLen, 16) movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B, SeqLen, 6,", "super(Bottom, self).__init__() self.dev = device self.embeddingGroups = nn.ModuleDict() # embedding group for key,", "movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B, SeqLen, 6, 8) movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat,", "2) def regLoss(self): totalRegLoss = torch.zeros(size=(1,), device=self.dev) for name, param in self.named_parameters(): if", "self.to(self.dev) def forward(self, movieIdSequence,ads, movieFeature): movieFeatSequence = movieFeature[movieIdSequence] adsFeat = movieFeature[ads] movieIdFeat =", "device=self.dev) # sequence pooling layer self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation, PReLuInit, initStd) # attention", "attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (..., dim, 1) -> (.... dim, E)", "torch.sum(x, dim=-2, keepdim=False) # (..., dim, E) -> (..., E) else: pass return", "mask): if self.mod == 'mean': length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (..., dim,", "PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001): super(MLP, self).__init__() self.multiLayerPerceptron = nn.ModuleList() # MLP for i", "# (B, 24) return movieSequenceEmbedding,adsEmbedding def forward_FR(self, movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return", "self.embeddingGroups[key] = nn.Embedding(value[0], value[1]) self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling layer self.attentionActivationUnit", "== 'mean': length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (..., dim, 6) -> (...,dim,", "nn.Embedding(value[0], value[1]) self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling layer self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo,", "(..., dim, 8) elif self.mod == 'attention': attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1) #", "1) x = torch.cat((x, target, product), dim=2) # (B, SeqLen, 72) x =", "adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6, 8) adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:]", "+ self.regLoss() return loss def predict(self, m1,m2,a1,a2): preds = self.forward(m1,m2,a1,a2)[:, 1] return preds.cpu().detach().numpy()", "else Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self, x): for layer in self.multiLayerPerceptron: x =", "dim=1) return x # (B, 2) def regLoss(self): totalRegLoss = torch.zeros(size=(1,), device=self.dev) for", "__init__(self, mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__() self.mod = mod self.dev = device self.eps =", "1, 24) target = torch.repeat_interleave(target, x.shape[-2], dim=1) # (B, SeqLen, 24) product =", "__init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001): super(MLP, self).__init__() self.multiLayerPerceptron = nn.ModuleList() #", "'mean': length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (..., dim, 6) -> (...,dim, 1)", "1:]) # (B, SeqLen, 6, 8) movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] >", "(B, SeqLen, 24) # product = torch.sum(product, dim=-1, keepdim=True) # (B, SeqLen, 1)", "= torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (..., dim, 6) -> (...,dim, 1) x =", "self.MLP = MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd) self.output = nn.Linear(attMLPInfo[-1], 1) def", "(...,dim, 1) x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, 6, 8) ->", "as nn import torch.nn.functional as F from torch.autograd import Variable class MLP(nn.Module): def", "'MovieId' or key == 'Genre': self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0) else: self.embeddingGroups[key] =", "= device self.MLP = MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate) # MLP self.output =", "x, target): target = torch.unsqueeze(target, dim=1) # (B, 1, 24) target = torch.repeat_interleave(target,", "return preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module): def __init__(self, mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__() self.mod = mod", "SeqLen, 8) #print(movieGenreFeat) #input() adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16) adsGenreFeat =", "(B, SeqLen, 72) x = self.MLP(x) x = self.output(x) # product = torch.sum(product,", "keepdim=True) # product = F.softmax(product, dim=1) return x # (B, SeqLen, 1) class", "self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1])) if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1])) actiFun = nn.PReLU(1, init=PReLuInit)", "dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(DIN, self).__init__() self.l2RegEmbeddding = l2RegEmbedding self.dev = device self.MLP =", "(..., dim, E) x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, E) ->", "= torch.sum(product, dim=-1, keepdim=True) # product = F.softmax(product, dim=1) return x # (B,", "class Bottom(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001,", "self.multiLayerPerceptron = nn.ModuleList() # MLP for i in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1]))", "attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen, 1) movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) #", "8) #print(movieGenreFeat) #input() adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16) adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:,", "Bottom(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')):", "movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] > 0) # (B, SeqLen, 8) #print(movieGenreFeat)", "8) x = torch.div(x, length.type(torch.float32) + self.eps) # (..., dim, 8) elif self.mod", "= self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B, SeqLen, 16) movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:])", "self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling layer self.to(self.dev) def forward(self, movieIdSequence,ads, movieFeature):", "= F.softmax(self.output(x), dim=1) return x # (B, 2) def regLoss(self): totalRegLoss = torch.zeros(size=(1,),", "movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 # MLP inputs x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1) x = self.MLP(x)", "torch.zeros(size=(1,), device=self.dev) for name, param in self.named_parameters(): if 'embedding' in name and 'MovieId'", "target) # (B, SeqLen, 24) # product = torch.sum(product, dim=-1, keepdim=True) # (B,", "24) product = torch.mul(x, target) # (B, SeqLen, 24) # product = torch.sum(product,", "lossFunc): preds = self.forward(m1,m2,a1,a2) loss = lossFunc(preds[:, 1], label.float(), reduction='mean') + self.regLoss() return", "# MLP for i in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1])) if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i", "# (..., dim, 8) elif self.mod == 'attention': attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1)", "(B, 6, 8) adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0) # (B, 8)", "in name: totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param) return totalRegLoss def loss(self, m1,m2,a1,a2,label, lossFunc):", "adsEmbedding=a1+a2 # MLP inputs x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1) x = self.MLP(x) x", "(B, 2) def regLoss(self): totalRegLoss = torch.zeros(size=(1,), device=self.dev) for name, param in self.named_parameters():", "torch.mul(x, target) # (B, SeqLen, 24) # product = torch.sum(product, dim=-1, keepdim=True) #", "(B, 24) movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B, SeqLen, 24) attentionWeights =", "# (..., dim, E) -> (..., E) else: pass return x class AttentionActivationUnit(nn.Module):", "(B, SeqLen, 1) x = torch.cat((x, target, product), dim=2) # (B, SeqLen, 72)", "for i in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1])) if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1]))", "activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(DIN, self).__init__() self.l2RegEmbeddding = l2RegEmbedding self.dev", "= self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24) return movieSequenceEmbedding,adsEmbedding def forward_FR(self, movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads,", "= self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6, 8) adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] >", "torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable class MLP(nn.Module):", "0) # (B, 8) adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B, 24) movieEmbedding", "key == 'Genre': self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0) else: self.embeddingGroups[key] = nn.Embedding(value[0], value[1])", "in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1])) if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1])) actiFun =", "class AttentionActivationUnit(nn.Module): def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit, self).__init__() self.MLP = MLP(attMLPInfo,", "= torch.sum(x, dim=-2, keepdim=False) # (..., dim, E) -> (..., E) else: pass", "self.dev = device self.eps = torch.FloatTensor([1e-8]).to(self.dev) def forward(self, x, mask): if self.mod ==", "1) movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24) return movieSequenceEmbedding,adsEmbedding def forward_FR(self, movieIdSequence,ads,", "= nn.ModuleDict() # embedding group for key, value in embeddingGroupInfo.items(): if key ==", "totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param) return totalRegLoss def loss(self, m1,m2,a1,a2,label, lossFunc): preds =", "in embeddingGroupInfo.items(): if key == 'MovieId' or key == 'Genre': self.embeddingGroups[key] = nn.Embedding(value[0],", "output layer self.to(self.dev) def forward(self, m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 # MLP inputs x", "nn.ModuleList() # MLP for i in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1])) if isUseBN:", "else: pass return x class AttentionActivationUnit(nn.Module): def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit,", "AttentionActivationUnit(attMLPInfo, activation, PReLuInit, initStd) # attention activation unit self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) #", "0) # (B, SeqLen, 8) #print(movieGenreFeat) #input() adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B,", "1:] > 0) # (B, SeqLen, 8) #print(movieGenreFeat) #input() adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0])", "# (B, SeqLen, 1) movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24) return movieSequenceEmbedding,adsEmbedding", "m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 # MLP inputs x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1) x", "torch.repeat_interleave(target, x.shape[-2], dim=1) # (B, SeqLen, 24) product = torch.mul(x, target) # (B,", "and 'weight' in name: totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param) return totalRegLoss def loss(self,", "= self.forward(m1,m2,a1,a2)[:, 1] return preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module): def __init__(self, mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__()", "* param*param) return totalRegLoss def loss(self, m1,m2,a1,a2,label, lossFunc): preds = self.forward(m1,m2,a1,a2) loss =", "unit self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling layer self.to(self.dev) def forward(self, movieIdSequence,ads,", "attentionWeights) # (..., dim, E) x = torch.sum(x, dim=-2, keepdim=False) # (..., dim,", "6, 8) -> (..., dim, 8) x = torch.div(x, length.type(torch.float32) + self.eps) #", "8) movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] > 0) # (B, SeqLen, 8)", "self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation, PReLuInit, initStd) # attention activation unit self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention',", "x class Bottom(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0,", "padding_idx=0) else: self.embeddingGroups[key] = nn.Embedding(value[0], value[1]) self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling", "import torch.nn.functional as F from torch.autograd import Variable class MLP(nn.Module): def __init__(self, MLPInfo,", "__init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(DIN, self).__init__()", "torch.sum(product, dim=-1, keepdim=True) # product = F.softmax(product, dim=1) return x # (B, SeqLen,", "super(AttentionActivationUnit, self).__init__() self.MLP = MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd) self.output = nn.Linear(attMLPInfo[-1],", "adsEmbedding), dim=-1) x = self.MLP(x) x = F.softmax(self.output(x), dim=1) return x # (B,", "value in embeddingGroupInfo.items(): if key == 'MovieId' or key == 'Genre': self.embeddingGroups[key] =", "in self.named_parameters(): if 'embedding' in name and 'MovieId' in name and 'weight' in", "self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self, x): for layer in self.multiLayerPerceptron: x = layer(x) return x", "self.dev = device self.embeddingGroups = nn.ModuleDict() # embedding group for key, value in", "# product = F.softmax(product, dim=1) return x # (B, SeqLen, 1) class Dice(nn.Module):", "SeqLen, 6, 8) movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] > 0) # (B,", "(B, SeqLen, 6, 8) movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] > 0) #", "self.forward(m1,m2,a1,a2)[:, 1] return preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module): def __init__(self, mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__() self.mod", "initStd=0.0001, device=torch.device('cpu')): super(DIN, self).__init__() self.l2RegEmbeddding = l2RegEmbedding self.dev = device self.MLP = MLP(MLPInfo,", "in name and 'MovieId' in name and 'weight' in name: totalRegLoss += torch.sum(self.l2RegEmbeddding", "dim=-2, keepdim=False) # (..., dim, E) -> (..., E) else: pass return x", "== 'PReLU' else Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self, x): for layer in self.multiLayerPerceptron:", "from torch.autograd import Variable class MLP(nn.Module): def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0,", "SeqLen, 72) x = self.MLP(x) x = self.output(x) # product = torch.sum(product, dim=-1,", "torch.FloatTensor([1e-8]).to(self.dev) def forward(self, x, mask): if self.mod == 'mean': length = torch.sum(mask.type(torch.float32), dim=-1,", "= MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd) self.output = nn.Linear(attMLPInfo[-1], 1) def forward(self,", "movieFeature[ads] movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B, SeqLen, 16) movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:,", "24) attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen, 1) movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights)", "PReLuInit, initStd) # attention activation unit self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling", "movieIdSequence,ads, movieFeature): movieFeatSequence = movieFeature[movieIdSequence] adsFeat = movieFeature[ads] movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0])", "= torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B, 24) movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1) #", "l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(DIN, self).__init__() self.l2RegEmbeddding = l2RegEmbedding self.dev = device self.MLP", "layer(x) return x class Bottom(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True,", "def forward(self, x, target): target = torch.unsqueeze(target, dim=1) # (B, 1, 24) target", "initStd=0.0001): super(MLP, self).__init__() self.multiLayerPerceptron = nn.ModuleList() # MLP for i in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i],", "# (B, 6, 8) adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0) # (B,", "= device self.eps = torch.FloatTensor([1e-8]).to(self.dev) def forward(self, x, mask): if self.mod == 'mean':", "self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0) # (B, 8) adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1)", "= nn.Linear(MLPInfo[-1], 2) # output layer self.to(self.dev) def forward(self, m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2", "activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd) self.output = nn.Linear(attMLPInfo[-1], 1) def forward(self, x, target):", "= torch.unsqueeze(target, dim=1) # (B, 1, 24) target = torch.repeat_interleave(target, x.shape[-2], dim=1) #", "> 0) # (B, SeqLen, 8) #print(movieGenreFeat) #input() adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) #", "(B, SeqLen, 1) movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24) return movieSequenceEmbedding,adsEmbedding def", "totalRegLoss def loss(self, m1,m2,a1,a2,label, lossFunc): preds = self.forward(m1,m2,a1,a2) loss = lossFunc(preds[:, 1], label.float(),", "self.MLP(x) x = self.output(x) # product = torch.sum(product, dim=-1, keepdim=True) # product =", "device self.MLP = MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate) # MLP self.output = nn.Linear(MLPInfo[-1],", "initStd=0.0001): super(AttentionActivationUnit, self).__init__() self.MLP = MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd) self.output =", "sequence pooling layer self.to(self.dev) def forward(self, movieIdSequence,ads, movieFeature): movieFeatSequence = movieFeature[movieIdSequence] adsFeat =", "'attention': attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (..., dim, 1) -> (.... dim,", "movieGenreFeat), dim=-1) # (B, SeqLen, 24) attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen,", "layer self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation, PReLuInit, initStd) # attention activation unit self.sequenceAttentionPooling =", "product = torch.mul(x, target) # (B, SeqLen, 24) # product = torch.sum(product, dim=-1,", "keepdim=False) # (..., dim, 6, 8) -> (..., dim, 8) x = torch.div(x,", "value[1]) self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling layer self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation,", "2) # output layer self.to(self.dev) def forward(self, m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 # MLP", "def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(Bottom,", "F.softmax(self.output(x), dim=1) return x # (B, 2) def regLoss(self): totalRegLoss = torch.zeros(size=(1,), device=self.dev)", "8) elif self.mod == 'attention': attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (..., dim,", "dropoutRate=0.0, initStd=0.0001): super(MLP, self).__init__() self.multiLayerPerceptron = nn.ModuleList() # MLP for i in range(len(MLPInfo)-1):", "= nn.ModuleList() # MLP for i in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1])) if", "MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(Bottom, self).__init__() self.dev =", "def regLoss(self): totalRegLoss = torch.zeros(size=(1,), device=self.dev) for name, param in self.named_parameters(): if 'embedding'", "= torch.sum(x, dim=-2, keepdim=False) # (..., dim, 6, 8) -> (..., dim, 8)", "# MLP inputs x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1) x = self.MLP(x) x =", "dim=1) # (B, 1, 24) target = torch.repeat_interleave(target, x.shape[-2], dim=1) # (B, SeqLen,", "torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (..., dim, 1) -> (.... dim, E) x =", "forward(self, x, mask): if self.mod == 'mean': length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) #", "72) x = self.MLP(x) x = self.output(x) # product = torch.sum(product, dim=-1, keepdim=True)", "return x # (B, 2) def regLoss(self): totalRegLoss = torch.zeros(size=(1,), device=self.dev) for name,", "return out class DIN(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6,", "or key == 'Genre': self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0) else: self.embeddingGroups[key] = nn.Embedding(value[0],", "= torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (..., dim, 1) -> (.... dim, E) x", "adsFeat = movieFeature[ads] movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B, SeqLen, 16) movieGenreFeat", "def forward(self, x, mask): if self.mod == 'mean': length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True)", "(.... dim, E) x = torch.mul(x, attentionWeights) # (..., dim, E) x =", "param in self.named_parameters(): if 'embedding' in name and 'MovieId' in name and 'weight'", "activation == 'PReLU' else Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self, x): for layer in", "Variable class MLP(nn.Module): def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001): super(MLP, self).__init__()", "activation, PReLuInit, isUseBN, dropoutRate) # MLP self.output = nn.Linear(MLPInfo[-1], 2) # output layer", "def forward(self, m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 # MLP inputs x = torch.cat((movieSequenceEmbedding, adsEmbedding),", "= torch.sum(product, dim=-1, keepdim=True) # (B, SeqLen, 1) x = torch.cat((x, target, product),", "pooling layer self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation, PReLuInit, initStd) # attention activation unit self.sequenceAttentionPooling", "totalRegLoss = torch.zeros(size=(1,), device=self.dev) for name, param in self.named_parameters(): if 'embedding' in name", "pass return x class AttentionActivationUnit(nn.Module): def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit, self).__init__()", "(..., dim, 6, 8) -> (..., dim, 8) x = torch.div(x, length.type(torch.float32) +", "dim, E) x = torch.mul(x, attentionWeights) # (..., dim, E) x = torch.sum(x,", "# (..., dim, 6) -> (...,dim, 1) x = torch.sum(x, dim=-2, keepdim=False) #", "isUseBN, dropoutRate) # MLP self.output = nn.Linear(MLPInfo[-1], 2) # output layer self.to(self.dev) def", "in name and 'weight' in name: totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param) return totalRegLoss", "'MovieId' in name and 'weight' in name: totalRegLoss += torch.sum(self.l2RegEmbeddding * param*param) return", "= layer(x) return x class Bottom(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25,", "movieFeature): movieFeatSequence = movieFeature[movieIdSequence] adsFeat = movieFeature[ads] movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) #", "MLP self.output = nn.Linear(MLPInfo[-1], 2) # output layer self.to(self.dev) def forward(self, m1,m2,a1,a2): #interactive", "self.embeddingGroups = nn.ModuleDict() # embedding group for key, value in embeddingGroupInfo.items(): if key", "= self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen, 1) movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B,", "'PReLU' else Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self, x): for layer in self.multiLayerPerceptron: x", "-> (...,dim, 1) x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, 6, 8)", "(B, 1, 24) target = torch.repeat_interleave(target, x.shape[-2], dim=1) # (B, SeqLen, 24) product", "device self.embeddingGroups = nn.ModuleDict() # embedding group for key, value in embeddingGroupInfo.items(): if", "x class AttentionActivationUnit(nn.Module): def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit, self).__init__() self.MLP =", "sequence pooling layer self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation, PReLuInit, initStd) # attention activation unit", "value[1], padding_idx=0) else: self.embeddingGroups[key] = nn.Embedding(value[0], value[1]) self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) # sequence", "24) movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B, SeqLen, 24) attentionWeights = self.attentionActivationUnit(movieEmbedding,", "SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling layer self.to(self.dev) def forward(self, movieIdSequence,ads, movieFeature): movieFeatSequence =", "def predict(self, m1,m2,a1,a2): preds = self.forward(m1,m2,a1,a2)[:, 1] return preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module): def __init__(self,", "x = F.softmax(self.output(x), dim=1) return x # (B, 2) def regLoss(self): totalRegLoss =", "self.output(x) # product = torch.sum(product, dim=-1, keepdim=True) # product = F.softmax(product, dim=1) return", "self.eps) # (..., dim, 8) elif self.mod == 'attention': attentionWeights = torch.repeat_interleave(mask, x.shape[-1],", "= nn.PReLU(1, init=PReLuInit) if activation == 'PReLU' else Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self,", "== 'attention': attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (..., dim, 1) -> (....", "nn import torch.nn.functional as F from torch.autograd import Variable class MLP(nn.Module): def __init__(self,", "(B, 16) adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6, 8) adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat,", "DIN(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')):", "x # (B, 2) def regLoss(self): totalRegLoss = torch.zeros(size=(1,), device=self.dev) for name, param", "nn.Linear(attMLPInfo[-1], 1) def forward(self, x, target): target = torch.unsqueeze(target, dim=1) # (B, 1,", "SeqLen, 16) movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B, SeqLen, 6, 8) movieGenreFeat", "isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(DIN, self).__init__() self.l2RegEmbeddding = l2RegEmbedding self.dev = device", "dim, 6) -> (...,dim, 1) x = torch.sum(x, dim=-2, keepdim=False) # (..., dim,", "attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(Bottom, self).__init__() self.dev = device", "super(MLP, self).__init__() self.multiLayerPerceptron = nn.ModuleList() # MLP for i in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i", "dim=-1) # (B, SeqLen, 24) attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen, 1)", "l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(Bottom, self).__init__() self.dev = device self.embeddingGroups = nn.ModuleDict() #", "#input() adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16) adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) #", "loss = lossFunc(preds[:, 1], label.float(), reduction='mean') + self.regLoss() return loss def predict(self, m1,m2,a1,a2):", "forward(self, x, target): target = torch.unsqueeze(target, dim=1) # (B, 1, 24) target =", "= self.MLP(x) x = self.output(x) # product = torch.sum(product, dim=-1, keepdim=True) # product", "MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(DIN, self).__init__() self.l2RegEmbeddding =", "self.output = nn.Linear(MLPInfo[-1], 2) # output layer self.to(self.dev) def forward(self, m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2", "if self.mod == 'mean': length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (..., dim, 6)", "= SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling layer self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation, PReLuInit, initStd)", "= lossFunc(preds[:, 1], label.float(), reduction='mean') + self.regLoss() return loss def predict(self, m1,m2,a1,a2): preds", "movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out class DIN(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU',", "dim=-1, keepdim=True) # (B, SeqLen, 1) x = torch.cat((x, target, product), dim=2) #", "MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate) # MLP self.output = nn.Linear(MLPInfo[-1], 2) # output", "self.mod == 'attention': attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (..., dim, 1) ->", "embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(DIN, self).__init__() self.l2RegEmbeddding", "activation unit self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling layer self.to(self.dev) def forward(self,", "# embedding group for key, value in embeddingGroupInfo.items(): if key == 'MovieId' or", "import Variable class MLP(nn.Module): def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001): super(MLP,", "self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24) return movieSequenceEmbedding,adsEmbedding def forward_FR(self, movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature)", "16) adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6, 8) adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:,", "device=torch.device('cpu')): super(DIN, self).__init__() self.l2RegEmbeddding = l2RegEmbedding self.dev = device self.MLP = MLP(MLPInfo, activation,", "torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (..., dim, 6) -> (...,dim, 1) x = torch.sum(x,", "= mod self.dev = device self.eps = torch.FloatTensor([1e-8]).to(self.dev) def forward(self, x, mask): if", "8) -> (..., dim, 8) x = torch.div(x, length.type(torch.float32) + self.eps) # (...,", "dim=-1, keepdim=True) # (..., dim, 6) -> (...,dim, 1) x = torch.sum(x, dim=-2,", "+= torch.sum(self.l2RegEmbeddding * param*param) return totalRegLoss def loss(self, m1,m2,a1,a2,label, lossFunc): preds = self.forward(m1,m2,a1,a2)", "movieFeature[movieIdSequence] adsFeat = movieFeature[ads] movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B, SeqLen, 16)", "# (B, SeqLen, 24) product = torch.mul(x, target) # (B, SeqLen, 24) #", "self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen, 1) movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24)", "(B, SeqLen, 16) movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B, SeqLen, 6, 8)", "dim=-1) x = self.MLP(x) x = F.softmax(self.output(x), dim=1) return x # (B, 2)", "self).__init__() self.multiLayerPerceptron = nn.ModuleList() # MLP for i in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i +", "self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B, SeqLen, 6, 8) movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :,", "attMLPInfo, activation='PReLu', PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit, self).__init__() self.MLP = MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0,", "PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd) self.output = nn.Linear(attMLPInfo[-1], 1) def forward(self, x, target): target", "self.embeddingGroups['MovieId'](movieFeatSequence[:, :, 0]) # (B, SeqLen, 16) movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) #", "if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1])) actiFun = nn.PReLU(1, init=PReLuInit) if activation == 'PReLU'", "activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001): super(MLP, self).__init__() self.multiLayerPerceptron = nn.ModuleList() # MLP for", "loss(self, m1,m2,a1,a2,label, lossFunc): preds = self.forward(m1,m2,a1,a2) loss = lossFunc(preds[:, 1], label.float(), reduction='mean') +", "# product = torch.sum(product, dim=-1, keepdim=True) # product = F.softmax(product, dim=1) return x", "label.float(), reduction='mean') + self.regLoss() return loss def predict(self, m1,m2,a1,a2): preds = self.forward(m1,m2,a1,a2)[:, 1]", "= nn.Embedding(value[0], value[1]) self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling layer self.attentionActivationUnit =", "6, 8) movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:, :, 1:] > 0) # (B, SeqLen,", "class MLP(nn.Module): def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001): super(MLP, self).__init__() self.multiLayerPerceptron", "device self.eps = torch.FloatTensor([1e-8]).to(self.dev) def forward(self, x, mask): if self.mod == 'mean': length", "PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(DIN, self).__init__() self.l2RegEmbeddding = l2RegEmbedding self.dev =", "1) x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, 6, 8) -> (...,", "range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1])) if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1])) actiFun = nn.PReLU(1,", "attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(DIN, self).__init__() self.l2RegEmbeddding = l2RegEmbedding", "target): target = torch.unsqueeze(target, dim=1) # (B, 1, 24) target = torch.repeat_interleave(target, x.shape[-2],", "import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import", ":, 0]) # (B, SeqLen, 16) movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B,", "# (B, 16) adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6, 8) adsGenreFeat =", "product = torch.sum(product, dim=-1, keepdim=True) # product = F.softmax(product, dim=1) return x #", "PReLuInit=0.25, initStd=0.0001): super(AttentionActivationUnit, self).__init__() self.MLP = MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd) self.output", "dim=-1) # (B, 24) movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B, SeqLen, 24)", "24) return movieSequenceEmbedding,adsEmbedding def forward_FR(self, movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out class", "= torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1) x = self.MLP(x) x = F.softmax(self.output(x), dim=1) return x", "x = torch.cat((x, target, product), dim=2) # (B, SeqLen, 72) x = self.MLP(x)", "SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling layer self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation, PReLuInit, initStd) #", "x = self.MLP(x) x = self.output(x) # product = torch.sum(product, dim=-1, keepdim=True) #", "x = self.MLP(x) x = F.softmax(self.output(x), dim=1) return x # (B, 2) def", "inputs x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1) x = self.MLP(x) x = F.softmax(self.output(x), dim=1)", "elif self.mod == 'attention': attentionWeights = torch.repeat_interleave(mask, x.shape[-1], dim=-1) # (..., dim, 1)", "pooling layer self.to(self.dev) def forward(self, movieIdSequence,ads, movieFeature): movieFeatSequence = movieFeature[movieIdSequence] adsFeat = movieFeature[ads]", "activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(Bottom, self).__init__() self.dev = device self.embeddingGroups", "> 0) # (B, 8) adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B, 24)", "self.l2RegEmbeddding = l2RegEmbedding self.dev = device self.MLP = MLP(MLPInfo, activation, PReLuInit, isUseBN, dropoutRate)", "preds = self.forward(m1,m2,a1,a2) loss = lossFunc(preds[:, 1], label.float(), reduction='mean') + self.regLoss() return loss", "if 'embedding' in name and 'MovieId' in name and 'weight' in name: totalRegLoss", "lossFunc(preds[:, 1], label.float(), reduction='mean') + self.regLoss() return loss def predict(self, m1,m2,a1,a2): preds =", "0]) # (B, 16) adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6, 8) adsGenreFeat", "-> (..., E) else: pass return x class AttentionActivationUnit(nn.Module): def __init__(self, attMLPInfo, activation='PReLu',", "layer in self.multiLayerPerceptron: x = layer(x) return x class Bottom(nn.Module): def __init__(self, embeddingGroupInfo,", "movieSequenceEmbedding = self.sequenceAttentionPooling(movieEmbedding, attentionWeights) # (B, 24) return movieSequenceEmbedding,adsEmbedding def forward_FR(self, movieIdSequence,ads, movieFeature):", "return loss def predict(self, m1,m2,a1,a2): preds = self.forward(m1,m2,a1,a2)[:, 1] return preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module):", "attentionWeights) # (B, 24) return movieSequenceEmbedding,adsEmbedding def forward_FR(self, movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0)", "initStd) # attention activation unit self.sequenceAttentionPooling = SequencePoolingLayer(mod='attention', device=self.dev) # sequence pooling layer", "MLP inputs x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1) x = self.MLP(x) x = F.softmax(self.output(x),", "def loss(self, m1,m2,a1,a2,label, lossFunc): preds = self.forward(m1,m2,a1,a2) loss = lossFunc(preds[:, 1], label.float(), reduction='mean')", "E) x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, E) -> (..., E)", "self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1])) actiFun = nn.PReLU(1, init=PReLuInit) if activation == 'PReLU' else Dice()", "in self.multiLayerPerceptron: x = layer(x) return x class Bottom(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo,", "# (..., dim, E) x = torch.sum(x, dim=-2, keepdim=False) # (..., dim, E)", "def forward(self, movieIdSequence,ads, movieFeature): movieFeatSequence = movieFeature[movieIdSequence] adsFeat = movieFeature[ads] movieIdFeat = self.embeddingGroups['MovieId'](movieFeatSequence[:,", "8) adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0) # (B, 8) adsEmbedding =", "def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, dropoutRate=0.0, initStd=0.0001): super(MLP, self).__init__() self.multiLayerPerceptron = nn.ModuleList()", "# (B, SeqLen, 24) attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen, 1) movieSequenceEmbedding", "name, param in self.named_parameters(): if 'embedding' in name and 'MovieId' in name and", "(..., dim, 8) x = torch.div(x, length.type(torch.float32) + self.eps) # (..., dim, 8)", "initStd=initStd) self.output = nn.Linear(attMLPInfo[-1], 1) def forward(self, x, target): target = torch.unsqueeze(target, dim=1)", "torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B, 24) movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B,", "regLoss(self): totalRegLoss = torch.zeros(size=(1,), device=self.dev) for name, param in self.named_parameters(): if 'embedding' in", "1) -> (.... dim, E) x = torch.mul(x, attentionWeights) # (..., dim, E)", "embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(Bottom, self).__init__() self.dev", "isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(Bottom, self).__init__() self.dev = device self.embeddingGroups = nn.ModuleDict()", "device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__() self.mod = mod self.dev = device self.eps = torch.FloatTensor([1e-8]).to(self.dev) def", "(B, SeqLen, 24) attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding) # (B, SeqLen, 1) movieSequenceEmbedding =", "nn.PReLU(1, init=PReLuInit) if activation == 'PReLU' else Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def forward(self, x):", "16) movieGenreFeat = self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B, SeqLen, 6, 8) movieGenreFeat =", "adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16) adsGenreFeat = self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B,", "E) -> (..., E) else: pass return x class AttentionActivationUnit(nn.Module): def __init__(self, attMLPInfo,", "(..., E) else: pass return x class AttentionActivationUnit(nn.Module): def __init__(self, attMLPInfo, activation='PReLu', PReLuInit=0.25,", "class DIN(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001,", "= self.embeddingGroups['Genre'](movieFeatSequence[:, :, 1:]) # (B, SeqLen, 6, 8) movieGenreFeat = self.sequenceMeanPooling(movieGenreFeat, movieFeatSequence[:,", "self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0) else: self.embeddingGroups[key] = nn.Embedding(value[0], value[1]) self.sequenceMeanPooling = SequencePoolingLayer(mod='mean',", "dim=2) # (B, SeqLen, 72) x = self.MLP(x) x = self.output(x) # product", "adsFeat[:, 1:] > 0) # (B, 8) adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1) #", "torch.nn.functional as F from torch.autograd import Variable class MLP(nn.Module): def __init__(self, MLPInfo, activation='PReLU',", "= self.output(x) # product = torch.sum(product, dim=-1, keepdim=True) # product = F.softmax(product, dim=1)", "x): for layer in self.multiLayerPerceptron: x = layer(x) return x class Bottom(nn.Module): def", "# (B, SeqLen, 72) x = self.MLP(x) x = self.output(x) # product =", "device=self.dev) for name, param in self.named_parameters(): if 'embedding' in name and 'MovieId' in", "movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B, SeqLen, 24) attentionWeights = self.attentionActivationUnit(movieEmbedding, adsEmbedding)", "return movieSequenceEmbedding,adsEmbedding def forward_FR(self, movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out class DIN(nn.Module):", "device=self.dev) # sequence pooling layer self.to(self.dev) def forward(self, movieIdSequence,ads, movieFeature): movieFeatSequence = movieFeature[movieIdSequence]", "# (..., dim, 1) -> (.... dim, E) x = torch.mul(x, attentionWeights) #", "adsGenreFeat), dim=-1) # (B, 24) movieEmbedding = torch.cat((movieIdFeat, movieGenreFeat), dim=-1) # (B, SeqLen,", "self).__init__() self.MLP = MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd) self.output = nn.Linear(attMLPInfo[-1], 1)", "(..., dim, E) -> (..., E) else: pass return x class AttentionActivationUnit(nn.Module): def", "target = torch.unsqueeze(target, dim=1) # (B, 1, 24) target = torch.repeat_interleave(target, x.shape[-2], dim=1)", "movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out class DIN(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25,", "x.shape[-1], dim=-1) # (..., dim, 1) -> (.... dim, E) x = torch.mul(x,", "self.embeddingGroups['Genre'](adsFeat[:, 1:]) # (B, 6, 8) adsGenreFeat = self.sequenceMeanPooling(adsGenreFeat, adsFeat[:, 1:] > 0)", "self).__init__() self.l2RegEmbeddding = l2RegEmbedding self.dev = device self.MLP = MLP(MLPInfo, activation, PReLuInit, isUseBN,", "movieFeatSequence[:, :, 1:] > 0) # (B, SeqLen, 8) #print(movieGenreFeat) #input() adsIdFeat =", "(B, 24) return movieSequenceEmbedding,adsEmbedding def forward_FR(self, movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out", "1], label.float(), reduction='mean') + self.regLoss() return loss def predict(self, m1,m2,a1,a2): preds = self.forward(m1,m2,a1,a2)[:,", "for name, param in self.named_parameters(): if 'embedding' in name and 'MovieId' in name", "length = torch.sum(mask.type(torch.float32), dim=-1, keepdim=True) # (..., dim, 6) -> (...,dim, 1) x", "preds = self.forward(m1,m2,a1,a2)[:, 1] return preds.cpu().detach().numpy() class SequencePoolingLayer(nn.Module): def __init__(self, mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer,", "# (B, SeqLen, 8) #print(movieGenreFeat) #input() adsIdFeat = self.embeddingGroups['MovieId'](adsFeat[:, 0]) # (B, 16)", "class SequencePoolingLayer(nn.Module): def __init__(self, mod='mean', device=torch.device('cpu')): super(SequencePoolingLayer, self).__init__() self.mod = mod self.dev =", "out class DIN(nn.Module): def __init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0,", "__init__(self, embeddingGroupInfo, MLPInfo, attMLPInfo, activation='PReLU', PReLuInit=0.25, isUseBN=True, l2RegEmbedding=1e-6, dropoutRate=0.0, initStd=0.0001, device=torch.device('cpu')): super(Bottom, self).__init__()", "self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) # sequence pooling layer self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation, PReLuInit,", "-> (.... dim, E) x = torch.mul(x, attentionWeights) # (..., dim, E) x", "= self.MLP(x) x = F.softmax(self.output(x), dim=1) return x # (B, 2) def regLoss(self):", "if key == 'MovieId' or key == 'Genre': self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0)", "MLP for i in range(len(MLPInfo)-1): self.multiLayerPerceptron.append(nn.Linear(MLPInfo[i], MLPInfo[i + 1])) if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i +", "= torch.mul(x, attentionWeights) # (..., dim, E) x = torch.sum(x, dim=-2, keepdim=False) #", "dim=1) return x # (B, SeqLen, 1) class Dice(nn.Module): def __init__(self): super(Dice, self).__init__()", "key == 'MovieId' or key == 'Genre': self.embeddingGroups[key] = nn.Embedding(value[0], value[1], padding_idx=0) else:", "1])) if isUseBN: self.multiLayerPerceptron.append(nn.BatchNorm1d(MLPInfo[i + 1])) actiFun = nn.PReLU(1, init=PReLuInit) if activation ==", "MLP(attMLPInfo, activation, PReLuInit, isUseBN=False, dropoutRate=0.0, initStd=initStd) self.output = nn.Linear(attMLPInfo[-1], 1) def forward(self, x,", "#interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 # MLP inputs x = torch.cat((movieSequenceEmbedding, adsEmbedding), dim=-1) x =", "nn.Embedding(value[0], value[1], padding_idx=0) else: self.embeddingGroups[key] = nn.Embedding(value[0], value[1]) self.sequenceMeanPooling = SequencePoolingLayer(mod='mean', device=self.dev) #", "as F from torch.autograd import Variable class MLP(nn.Module): def __init__(self, MLPInfo, activation='PReLU', PReLuInit=0.25,", "# (B, 8) adsEmbedding = torch.cat((adsIdFeat, adsGenreFeat), dim=-1) # (B, 24) movieEmbedding =", "-> (..., dim, 8) x = torch.div(x, length.type(torch.float32) + self.eps) # (..., dim,", "# output layer self.to(self.dev) def forward(self, m1,m2,a1,a2): #interactive movieSequenceEmbedding=m1+m2 adsEmbedding=a1+a2 # MLP inputs", "forward_FR(self, movieIdSequence,ads, movieFeature): movieSequenceEmbedding,adsEmbedding=self.forward(movieIdSequence,ads, movieFeature) out=torch.cat((movieSequenceEmbedding,adsEmbedding),dim=0) return out class DIN(nn.Module): def __init__(self, embeddingGroupInfo,", "= torch.mul(x, target) # (B, SeqLen, 24) # product = torch.sum(product, dim=-1, keepdim=True)", "# sequence pooling layer self.attentionActivationUnit = AttentionActivationUnit(attMLPInfo, activation, PReLuInit, initStd) # attention activation", "actiFun = nn.PReLU(1, init=PReLuInit) if activation == 'PReLU' else Dice() self.multiLayerPerceptron.append(actiFun) self.multiLayerPerceptron.append(nn.Dropout(dropoutRate)) def", "# (B, SeqLen, 1) x = torch.cat((x, target, product), dim=2) # (B, SeqLen,", "= torch.zeros(size=(1,), device=self.dev) for name, param in self.named_parameters(): if 'embedding' in name and" ]
[]
[ "{ 'eval': { 'func_2': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1']", "'!eq 1'] } }, 'comp_name2': { 'eval': { 'func_1': {} }, 'expect': {", "'str']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': 'sss'}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field':", "4})) with self.assertRaises(ValueError): # probe statement confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1', {'int_field': 2}))", "can be # found in the LICENSE file. import re import unittest from", "{'int_field': 1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2', {'int_field':", "cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [False, 'str']})) result", "{ 'int_field': [True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1, cps2) def", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {}, func_arg={'arg_1': 'aaa'}))", "self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': 'sss'}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [True, 'str', '!eq", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } } } })) def", "[True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashCategoryNamesDiffer(self): cps1", "'') builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3) self.probe_statement_definition = builder.Build() class", "'str_field': 'sss', 'int_field': 3, 'hex_field': '0BAD'}, information={'comp_group': 'other_name'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1',", "[True, 'str', '!eq sss'], 'int_field': [True, 'int', '!eq 3'], 'hex_field': [True, 'hex', '!eq", "'int_field': [True, 'int', '!eq 1'] } } }, 'category2': { 'comp_name1': { 'eval':", "'func_1', {'str_field': [False, 'str']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': 'sss'}) self.assertEqual( result,", "_GenerateExpectResult(self, comp_name, func_name, expect_field, func_arg=None, information=None): statement = { 'eval': { func_name: func_arg", "result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [True, 'int', '!eq 3']})) def testGenerateProbeStatementStrField(self): result = self.probe_statement_definition.GenerateProbeStatement(", "class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2',", "} }) def testFromDictMultipleComponents(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval':", "probe_config_types from cros.factory.utils import json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1',", "this source code is governed by a BSD-style license that can be #", "3, 'hex_field': '0BAD'}, information={'comp_group': 'other_name'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', { 'str_field': [True,", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {})) def testGenerateProbeStatementIntField(self): result", "1'] } }), probe_config_types.ComponentProbeStatement.FromDict({ 'category1': { 'comp1': { 'eval': { 'func_1': {} },", "{ 'func_2': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } })", "} } }, 'category2': { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect':", "'This is field2') builder.AddHexOutputField('field3', 'This is field3') builder.AddIntOutputField('field_only_func1', 'This is field ?', probe_function_names=['func_1'])", "self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval': { 'func_1': {} },", "self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2', {'int_field': 3})) with self.assertRaises(ValueError): # component name confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement(", "'!eq 1'] } } } }) def testFromDictMiscErrors(self): self.assertRaises(ValueError, probe_config_types.ComponentProbeStatement.FromDict, {'category1': 100}) if", "field3') builder.AddIntOutputField('field_only_func1', 'This is field ?', probe_function_names=['func_1']) d = builder.Build() self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()),", "'comp_1', 'func_1', {'str_field_started_with_a': re.compile('x.*')}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', { 'str_field_started_with_a': [True, 'str', '!re", "self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [False, 'int']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field':", "result, self._GenerateExpectResult('comp_1', 'func_1', { 'str_field_started_with_a': [True, 'str', '!re x.*'] })) def testGenerateProbeStatementHexField(self): result", "?', probe_function_names=['func_1']) d = builder.Build() self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()),", "'sss'}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [True, 'str', '!eq sss']})) result = self.probe_statement_definition.GenerateProbeStatement(", "'eval': { 'func_2': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] }", "cros.factory.probe.runtime_probe import probe_config_types from cros.factory.utils import json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self): builder =", "cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashFunctionNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1':", "'!eq 0x0AAAA']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }, { 'str_field':", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field_three_digits': [True, 'hex',", "'comp1', { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field_three_digits': [True,", "'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashCategoryNamesDiffer(self): cps1 =", "the LICENSE file. import re import unittest from cros.factory.probe.runtime_probe import probe_config_types from cros.factory.utils", "'hex_field': '0AAAA' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) result", "{'int_field': [True, 'int', '!eq 2']} }, 'comp_3': { 'eval': {'func_2': {}}, 'expect': {'int_field':", "'comp_group': 'other_name'})) def testGenerateProbeStatementWithArgument(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result,", "unittest from cros.factory.probe.runtime_probe import probe_config_types from cros.factory.utils import json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self):", "'func_1', {'str_field': [True, 'str', '!eq sss']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'})", "expression check if the given expected value is also # an regular expression", "} } }) def testFromDictMiscErrors(self): self.assertRaises(ValueError, probe_config_types.ComponentProbeStatement.FromDict, {'category1': 100}) if __name__ == '__main__':", "{'hex_field': '0AAAA'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) with", "'This is field3') builder.AddIntOutputField('field_only_func1', 'This is field ?', probe_function_names=['func_1']) d = builder.Build() self.assertEqual(d.category_name,", "'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }, 'comp_name2':", "'!eq 3']})) def testGenerateProbeStatementStrField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': None}) self.assertEqual( result,", "'eval': {'func_2': {}}, 'expect': {'int_field': [True, 'int', '!eq 3']} }, } }) class", "'func_1', {'hex_field': '0AAAA'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']}))", "'int_field': [True, 'int', '!eq 3'], 'hex_field': [True, 'hex', '!eq 0x0BAD']}, information={ 'comp_group': 'other_name'}))", "builder.AddHexOutputField('field3', 'This is field3') builder.AddIntOutputField('field_only_func1', 'This is field ?', probe_function_names=['func_1']) d = builder.Build()", "'func_1', {'str_field_started_with_a': 'b_value'}) # Ignore the regular expression check if the given expected", "} } } }) def testFromDictComponentNameNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 3.1415926:", "self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits': [False, 'hex']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'}) self.assertEqual(", "'sss', 'int_field': 3, 'hex_field': '0BAD'}, information={'comp_group': 'other_name'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {", "3.1415926: { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq", "'int_field': [True, 'int', '!eq 1'] } } } }) def testFromDictMultipleComponents(self): self.assertRaises( ValueError,", "builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3) self.probe_statement_definition = builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase):", "[f.name for f in d.probe_functions['func_1'].output_fields], ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual( [f.name for f", "'', num_value_digits=3) self.probe_statement_definition = builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self, comp_name, func_name, expect_field, func_arg=None,", "func 2.') builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits', '',", "'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2']) self.assertCountEqual( [f.name for f in d.probe_functions['func_1'].output_fields], ['field1', 'field2', 'field3',", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', { 'str_field': 'sss', 'int_field': 3, 'hex_field': '0BAD'}, information={'comp_group': 'other_name'})", "cps2) def testHashCategoryNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} },", "testGenerateProbeStatementStrField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field':", "'hex', '!eq 0x0BAD']}, information={ 'comp_group': 'other_name'})) def testGenerateProbeStatementWithArgument(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "'!eq 1'] } } } }) def testFromDictMultipleComponents(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1':", "[True, 'int', '!eq 3'], 'hex_field': [True, 'hex', '!eq 0x0BAD']}, information={ 'comp_group': 'other_name'})) def", "'func_1', {'hex_field_three_digits': 'B3FF'}) def testGenerateProbeStatementList(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA'", "'int_field': [True, 'int', '!eq 1'] } }, 'comp_name2': { 'eval': { 'func_1': {}", "testGenerateProbeStatementList(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1',", "{'int_field': 4})) with self.assertRaises(ValueError): # probe statement confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1', {'int_field':", "d.probe_functions['func2'].output_fields], ['field1', 'field2', 'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This", "self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1, cps2) def testHashCompNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': {", "'field2', 'field3', 'field_only_func1']) self.assertCountEqual( [f.name for f in d.probe_functions['func2'].output_fields], ['field1', 'field2', 'field3']) class", "sss'] }])) def testGenerateProbeStatementExtraInformation(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', { 'str_field': 'sss', 'int_field':", "confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1', {'int_field': 2})) result = p.DumpToString() self.assertEqual( json_utils.LoadStr(result), {", "self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement(", "def testGenerateProbeStatementIntField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1',", "{ 'str_field': [True, 'str', '!eq sss'], 'int_field': [True, 'int', '!eq 3'], 'hex_field': [True,", "builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is func 2.')", "func_name, expect_field, func_arg=None, information=None): statement = { 'eval': { func_name: func_arg or {}", "'!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashFunctionNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1',", "1'] } } } }) def testFromDictMultipleComponents(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': {", "testFromDictComponentNameNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 3.1415926: { 'eval': { 'func_1': {}", "# found in the LICENSE file. import re import unittest from cros.factory.probe.runtime_probe import", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }, { 'str_field': 'sss' }]) self.assertEqual(", "result, self._GenerateExpectResult( 'comp_1', 'func_1', {'str_field_started_with_a': [True, 'str', '!eq a_value']})) with self.assertRaises(ValueError): # format", "} }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1, cps2) def testHashCompNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {", "} }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashCategoryNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {", "expect_field } if information is not None: statement['information'] = information return probe_config_types.ComponentProbeStatement('category_x', comp_name,", "'func_1', { 'str_field': [True, 'str', '!eq sss'], 'int_field': [True, 'int', '!eq 3'], 'hex_field':", "pattern. result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': re.compile('x.*')}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {", "{} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash)", "1'] } } } }) def testFromDictComponentNameNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': {", "'comp_1', 'func_1', {'int_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [False, 'int']})) result =", "{ 'int_field': [True, 'int', '!eq 1'] } } }, 'category2': { 'comp_name1': {", "} } } })) def testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1':", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'}) # Ignore the regular expression check if the", "[True, 'int', '!eq 1'] } } } }) def testFromDictComponentNameNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict,", "'int', '!eq 3']} }, } }) class ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self): cps1 = probe_config_types.ComponentProbeStatement('category1',", "information={'comp_group': 'other_name'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', { 'str_field': [True, 'str', '!eq sss'],", "[{ 'hex_field': '0AAAA' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']}))", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }, { 'str_field': 'sss' }])", "1'] } } }, 'category2': { 'comp_name1': { 'eval': { 'func_1': {} },", "'hex_field': [True, 'hex', '!eq 0x0AAAA'] }, { 'str_field': [True, 'str', '!eq sss'] }]))", "'expect': {'int_field': [True, 'int', '!eq 2']} }, 'comp_3': { 'eval': {'func_2': {}}, 'expect':", "class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self): p = probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 1}))", "self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [False, 'str']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field':", "[True, 'hex', '!eq 0xB3F']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'}) def testGenerateProbeStatementList(self):", "}) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': {", "ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 123: { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect':", "'hex_field': [True, 'hex', '!eq 0x0BAD']}, information={ 'comp_group': 'other_name'})) def testGenerateProbeStatementWithArgument(self): result = self.probe_statement_definition.GenerateProbeStatement(", "3']} }, } }) class ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {", "sss'], 'int_field': [True, 'int', '!eq 3'], 'hex_field': [True, 'hex', '!eq 0x0BAD']}, information={ 'comp_group':", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': 'sss'}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [True,", "{'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 2']} }, 'comp_3': { 'eval': {'func_2':", "testFromDictCategoryNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 123: { 'comp_name1': { 'eval': { 'func_1': {}", "is not None: statement['information'] = information return probe_config_types.ComponentProbeStatement('category_x', comp_name, statement) def testGenerateProbeStatementNoField(self): result", "['field1', 'field2', 'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is", "{'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'}) def", "is governed by a BSD-style license that can be # found in the", "'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2']) self.assertCountEqual( [f.name for f", "'comp_name2': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field': [True,", "})) def testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval': {", "self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2']) self.assertCountEqual( [f.name for f in", "} } } }) def testFromDictMiscErrors(self): self.assertRaises(ValueError, probe_config_types.ComponentProbeStatement.FromDict, {'category1': 100}) if __name__ ==", "}) def testFromDictComponentNameNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 3.1415926: { 'eval': {", "[True, 'int', '!eq 3']})) def testGenerateProbeStatementStrField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': None})", "self.assertEqual(cps1, cps2) def testHashCompNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {}", "func_arg or {} }, 'expect': expect_field } if information is not None: statement['information']", "'func2']) self.assertCountEqual( [f.name for f in d.probe_functions['func_1'].output_fields], ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual( [f.name", "= builder.Build() self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2']) self.assertCountEqual(", "return probe_config_types.ComponentProbeStatement('category_x', comp_name, statement) def testGenerateProbeStatementNoField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}) self.assertEqual(result,", "ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This", "file. import re import unittest from cros.factory.probe.runtime_probe import probe_config_types from cros.factory.utils import json_utils", "'int', '!eq 1'] } } }, 'category2': { 'comp_name1': { 'eval': { 'func_1':", "json_utils.LoadStr(result), { 'category_x': { 'comp_1': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int',", "self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [True, 'str', '!eq sss']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "'str_field': [True, 'str', '!eq sss'] }])) def testGenerateProbeStatementExtraInformation(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field_three_digits': [True, 'hex', '!eq", "value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3) self.probe_statement_definition = builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self,", "re import unittest from cros.factory.probe.runtime_probe import probe_config_types from cros.factory.utils import json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase):", "self.assertCountEqual( [f.name for f in d.probe_functions['func2'].output_fields], ['field1', 'field2', 'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self):", "def testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': {", "'func_1', {'int_field': 2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2', {'int_field': 3})) with self.assertRaises(ValueError): # component", "probe_function_names=['func_1']) d = builder.Build() self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1',", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1',", "'!eq 1'] } } } }) def testFromDictComponentNameNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1':", "'int_field': 3, 'hex_field': '0BAD'}, information={'comp_group': 'other_name'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', { 'str_field':", "'str', '!eq sss'], 'int_field': [True, 'int', '!eq 3'], 'hex_field': [True, 'hex', '!eq 0x0BAD']},", "[False, 'int']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 3}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1',", "ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect':", "1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_2': {} },", "'func_1', {'hex_field': 'xyz'}) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1',", "LICENSE file. import re import unittest from cros.factory.probe.runtime_probe import probe_config_types from cros.factory.utils import", "'comp1', { 'eval': { 'func_2': {} }, 'expect': { 'int_field': [True, 'int', '!eq", "} }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval':", "import json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func", "} }, 'comp_name2': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True,", "{ 'eval': {'func_2': {}}, 'expect': {'int_field': [True, 'int', '!eq 3']} }, } })", "{ 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } })", "'func_1', { 'str_field': 'sss', 'int_field': 3, 'hex_field': '0BAD'}, information={'comp_group': 'other_name'}) self.assertEqual( result, self._GenerateExpectResult(", "'category1': { 3.1415926: { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True,", "'expect': {'int_field': [True, 'int', '!eq 3']} }, } }) class ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self):", "probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect': {", "['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual( [f.name for f in d.probe_functions['func2'].output_fields], ['field1', 'field2', 'field3'])", "{'hex_field_three_digits': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits': [False, 'hex']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} },", "information return probe_config_types.ComponentProbeStatement('category_x', comp_name, statement) def testGenerateProbeStatementNoField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {})", "}, { 'str_field': 'sss' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', [{ 'hex_field': [True, 'hex',", "'!eq 1']} }, 'comp_2': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq", "func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a', '',", "'category1': { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True,", "}) def testFromDictMultipleComponents(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval': {", "[True, 'str', '!re x.*'] })) def testGenerateProbeStatementHexField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field':", "{} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } } } })", "field2') builder.AddHexOutputField('field3', 'This is field3') builder.AddIntOutputField('field_only_func1', 'This is field ?', probe_function_names=['func_1']) d =", "if the given expected value is also # an regular expression pattern. result", "cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_2': {} }, 'expect': { 'int_field':", "[True, 'str', '!eq a_value']})) with self.assertRaises(ValueError): # format error self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a':", "'hex', '!eq 0x0AAAA']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }, {", "'expect': { 'int_field': [True, 'int', '!eq 1'] } } }, 'category2': { 'comp_name1':", "self._GenerateExpectResult('comp_1', 'func_1', [{ 'hex_field': [True, 'hex', '!eq 0x0AAAA'] }, { 'str_field': [True, 'str',", "BSD-style license that can be # found in the LICENSE file. import re", "self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {})) def testGenerateProbeStatementIntField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': None})", "information is not None: statement['information'] = information return probe_config_types.ComponentProbeStatement('category_x', comp_name, statement) def testGenerateProbeStatementNoField(self):", "builder.AddIntOutputField('field1', 'This is field1') builder.AddStrOutputField('field2', 'This is field2') builder.AddHexOutputField('field3', 'This is field3') builder.AddIntOutputField('field_only_func1',", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field':", "OS Authors. All rights reserved. # Use of this source code is governed", "def testFromDictMultipleComponents(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval': { 'func_1':", "self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [True, 'int', '!eq 3']})) def testGenerateProbeStatementStrField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "'func_1', {}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {})) def testGenerateProbeStatementIntField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashFunctionNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1',", "} }) def testFromDictMiscErrors(self): self.assertRaises(ValueError, probe_config_types.ComponentProbeStatement.FromDict, {'category1': 100}) if __name__ == '__main__': unittest.main()", "p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 2})) p.AddComponentProbeStatement(", "def testFromDictComponentNameNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 3.1415926: { 'eval': { 'func_1':", "self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [True, 'str', '!eq sss']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a':", "{'str_field_started_with_a': [True, 'str', '!eq a_value']})) with self.assertRaises(ValueError): # format error self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "'comp_1', 'func_1', {'hex_field_three_digits': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits': [False, 'hex']})) result =", "'func_1', [{ 'hex_field': [True, 'hex', '!eq 0x0AAAA'] }, { 'str_field': [True, 'str', '!eq", "{ 'str_field': [True, 'str', '!eq sss'] }])) def testGenerateProbeStatementExtraInformation(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "of this source code is governed by a BSD-style license that can be", "'int', '!eq 1']} }, 'comp_2': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int',", "the given expected value is also # an regular expression pattern. result =", "'func_1', {'str_field': 'sss'}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [True, 'str', '!eq sss']})) result", "builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field',", "= probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('field1',", "2.') builder.AddIntOutputField('field1', 'This is field1') builder.AddStrOutputField('field2', 'This is field2') builder.AddHexOutputField('field3', 'This is field3')", "'b_value'}) # Ignore the regular expression check if the given expected value is", "be # found in the LICENSE file. import re import unittest from cros.factory.probe.runtime_probe", "that can be # found in the LICENSE file. import re import unittest", "self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashCategoryNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': {", "def testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval': { 'func_1':", "'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] }", "p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 4})) with self.assertRaises(ValueError): # probe statement confliction. p.AddComponentProbeStatement(", "testGenerateProbeStatementHexField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': '0AAAA'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1',", "0x0AAAA']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': 'xyz'}) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [True, 'int', '!eq 3']})) def testGenerateProbeStatementStrField(self): result =", "'comp_1', 'func_1', { 'str_field': [True, 'str', '!eq sss'], 'int_field': [True, 'int', '!eq 3'],", "# probe statement confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1', {'int_field': 2})) result = p.DumpToString()", "{ 'comp1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int',", "self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2']) self.assertCountEqual( [f.name for", "is func 2.') builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits',", "'comp_1', 'func_1', {'int_field': 1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3',", "testHashCategoryNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': {", "{'int_field': 3}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [True, 'int', '!eq 3']})) def testGenerateProbeStatementStrField(self):", "'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {}, func_arg={'arg_1': 'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self): p =", "'') builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3) self.probe_statement_definition =", "# Ignore the regular expression check if the given expected value is also", "result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {}, func_arg={'arg_1': 'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self): p = probe_config_types.ProbeConfigPayload()", "'comp_1', 'func_1', { 'str_field': 'sss', 'int_field': 3, 'hex_field': '0BAD'}, information={'comp_group': 'other_name'}) self.assertEqual( result,", "{'int_field': 2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2', {'int_field': 3})) with self.assertRaises(ValueError): # component name", "[True, 'int', '!eq 1']} }, 'comp_2': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True,", "'func_1', {'int_field': 1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2',", "'int', '!eq 1'] } } } })) def testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, {", "also # an regular expression pattern. result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': re.compile('x.*')})", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': 'xyz'}) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': None}) self.assertEqual(", "{'str_field': 'sss'}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [True, 'str', '!eq sss']})) result =", "3']})) def testGenerateProbeStatementStrField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1',", "[True, 'int', '!eq 2']} }, 'comp_3': { 'eval': {'func_2': {}}, 'expect': {'int_field': [True,", "{'str_field_started_with_a': 'b_value'}) # Ignore the regular expression check if the given expected value", "}, 'category2': { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2', {'int_field': 3})) with self.assertRaises(ValueError):", "} }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashFunctionNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {", "'This is func 2.') builder.AddIntOutputField('field1', 'This is field1') builder.AddStrOutputField('field2', 'This is field2') builder.AddHexOutputField('field3',", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field_three_digits':", "{ 'int_field': [True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {}, func_arg={'arg_1':", "{'str_field_started_with_a': 'a_value'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'str_field_started_with_a': [True, 'str', '!eq a_value']})) with", "'other_name'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', { 'str_field': [True, 'str', '!eq sss'], 'int_field':", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits': [False, 'hex']}))", "information={ 'comp_group': 'other_name'})) def testGenerateProbeStatementWithArgument(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'})", "name confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 4})) with self.assertRaises(ValueError): # probe statement", "self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', { 'str_field_started_with_a': [True, 'str', '!re x.*'] })) def testGenerateProbeStatementHexField(self):", "for f in d.probe_functions['func2'].output_fields], ['field1', 'field2', 'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self): builder =", "'!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1',", "'str', '!eq sss']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'}) self.assertEqual( result, self._GenerateExpectResult(", "'B3F'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']})) with self.assertRaises(ValueError):", "}, 'comp_name2': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int',", "'comp_1', 'func_1', {'str_field_started_with_a': [True, 'str', '!eq a_value']})) with self.assertRaises(ValueError): # format error self.probe_statement_definition.GenerateProbeStatement(", "'func_1', [{ 'hex_field': '0AAAA' }, { 'str_field': 'sss' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1',", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', { 'str_field': 'sss', 'int_field': 3, 'hex_field': '0BAD'}, information={'comp_group':", "'comp_3', 'func_2', {'int_field': 3})) with self.assertRaises(ValueError): # component name confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2',", "with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': 'xyz'}) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits':", "{} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }), probe_config_types.ComponentProbeStatement.FromDict({ 'category1':", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'}) def testGenerateProbeStatementList(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{", "'str', '!eq a_value']})) with self.assertRaises(ValueError): # format error self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'})", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1,", "[{ 'hex_field': '0AAAA' }, { 'str_field': 'sss' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', [{", "{ 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 2']} }, 'comp_3': {", "setUp(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is func", "{'hex_field': [True, 'hex', '!eq 0x0AAAA']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': 'xyz'}) result", "def testFromDictCategoryNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 123: { 'comp_name1': { 'eval': { 'func_1':", "self._GenerateExpectResult( 'comp_1', 'func_1', { 'str_field': [True, 'str', '!eq sss'], 'int_field': [True, 'int', '!eq", "{'int_field': [True, 'int', '!eq 3']} }, } }) class ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self): cps1", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': re.compile('x.*')}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', { 'str_field_started_with_a':", "#!/usr/bin/env python3 # Copyright 2020 The Chromium OS Authors. All rights reserved. #", "self.assertEqual( json_utils.LoadStr(result), { 'category_x': { 'comp_1': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True,", "'comp_1', 'func_1', {'str_field': 'sss'}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [True, 'str', '!eq sss']}))", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': re.compile('x.*')}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', { 'str_field_started_with_a': [True,", "testGenerateProbeStatementExtraInformation(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', { 'str_field': 'sss', 'int_field': 3, 'hex_field': '0BAD'},", "comp_name, func_name, expect_field, func_arg=None, information=None): statement = { 'eval': { func_name: func_arg or", "1']} }, 'comp_2': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 2']}", "def testAll(self): p = probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement(", "'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2)", "'int_field': [True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashCategoryNamesDiffer(self):", "= builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self, comp_name, func_name, expect_field, func_arg=None, information=None): statement =", "'int_field': [True, 'int', '!eq 1'] } } }, }) def testFromDictCategoryNotString(self): self.assertRaises( ValueError,", "'int_field': [True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1, cps2) def testHashCompNamesDiffer(self):", "builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('field1', 'This is", "self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2']) self.assertCountEqual( [f.name for f in d.probe_functions['func_1'].output_fields], ['field1', 'field2', 'field3', 'field_only_func1'])", "regular expression check if the given expected value is also # an regular", "'int_field': [True, 'int', '!eq 1'] } } } }) def testFromDictComponentNameNotString(self): self.assertRaises( ValueError,", "'comp_1', 'func_1', {'str_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [False, 'str']})) result =", "0x0AAAA'] }, { 'str_field': [True, 'str', '!eq sss'] }])) def testGenerateProbeStatementExtraInformation(self): result =", "'func_1', {})) def testGenerateProbeStatementIntField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': None}) self.assertEqual( result,", "not None: statement['information'] = information return probe_config_types.ComponentProbeStatement('category_x', comp_name, statement) def testGenerateProbeStatementNoField(self): result =", "{}, func_arg={'arg_1': 'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self): p = probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "= p.DumpToString() self.assertEqual( json_utils.LoadStr(result), { 'category_x': { 'comp_1': { 'eval': {'func_1': {}}, 'expect':", "{'str_field_started_with_a': re.compile('x.*')}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', { 'str_field_started_with_a': [True, 'str', '!re x.*'] }))", "'!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashCategoryNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1',", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {})) def testGenerateProbeStatementIntField(self): result =", "probe_config_types.ComponentProbeStatement('category_x', comp_name, statement) def testGenerateProbeStatementNoField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}) self.assertEqual(result, self._GenerateExpectResult('comp_1',", "testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {}, func_arg={'arg_1': 'aaa'})) class", "num_value_digits=3) self.probe_statement_definition = builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self, comp_name, func_name, expect_field, func_arg=None, information=None):", "builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('field1', 'This is field1') builder.AddStrOutputField('field2', 'This is field2')", "{'func_2': {}}, 'expect': {'int_field': [True, 'int', '!eq 3']} }, } }) class ComponentProbeStatementTest(unittest.TestCase):", "cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashCategoryNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1':", "'str', '!eq sss'] }])) def testGenerateProbeStatementExtraInformation(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', { 'str_field':", "'func_2', {'int_field': 3})) with self.assertRaises(ValueError): # component name confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1',", "'!eq 0x0AAAA'] }, { 'str_field': [True, 'str', '!eq sss'] }])) def testGenerateProbeStatementExtraInformation(self): result", "[f.name for f in d.probe_functions['func2'].output_fields], ['field1', 'field2', 'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self): builder", "check if the given expected value is also # an regular expression pattern.", "'field3', 'field_only_func1']) self.assertCountEqual( [f.name for f in d.probe_functions['func2'].output_fields], ['field1', 'field2', 'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase):", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } } } }) def", "is field3') builder.AddIntOutputField('field_only_func1', 'This is field ?', probe_function_names=['func_1']) d = builder.Build() self.assertEqual(d.category_name, 'category_x')", "'hex']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1',", "{}}, 'expect': {'int_field': [True, 'int', '!eq 3']} }, } }) class ComponentProbeStatementTest(unittest.TestCase): def", "'!eq 1'] } } }, }) def testFromDictCategoryNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 123:", "'a_value'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'str_field_started_with_a': [True, 'str', '!eq a_value']})) with self.assertRaises(ValueError):", "'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {}, func_arg={'arg_1': 'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase):", "'comp_name1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq", "2']} }, 'comp_3': { 'eval': {'func_2': {}}, 'expect': {'int_field': [True, 'int', '!eq 3']}", "func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('field1', 'This is field1') builder.AddStrOutputField('field2', 'This", "probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 2}))", "} } }, }) def testFromDictCategoryNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 123: { 'comp_name1':", "self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'str_field_started_with_a': [True, 'str', '!eq a_value']})) with self.assertRaises(ValueError): #", "'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.')", "self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 123: { 'comp_name1': { 'eval': { 'func_1': {} },", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }, 'comp_name2': { 'eval':", "ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This", "{}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {})) def testGenerateProbeStatementIntField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field':", "'!eq 2']} }, 'comp_3': { 'eval': {'func_2': {}}, 'expect': {'int_field': [True, 'int', '!eq", "'!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_2': {}", "{'int_field': [True, 'int', '!eq 3']})) def testGenerateProbeStatementStrField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field':", "cros.factory.utils import json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is", "self.assertRaises(ValueError): # format error self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'}) # Ignore the regular", "3})) with self.assertRaises(ValueError): # component name confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 4}))", "'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } } }", "'int', '!eq 3'], 'hex_field': [True, 'hex', '!eq 0x0BAD']}, information={ 'comp_group': 'other_name'})) def testGenerateProbeStatementWithArgument(self):", "'comp_1', 'func_1', {'hex_field': 'xyz'}) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': None}) self.assertEqual( result,", "{ 'comp_name1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int',", "statement['information'] = information return probe_config_types.ComponentProbeStatement('category_x', comp_name, statement) def testGenerateProbeStatementNoField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } } }, }) def", "{ 'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', {", "license that can be # found in the LICENSE file. import re import", "builder.AddStrOutputField('field2', 'This is field2') builder.AddHexOutputField('field3', 'This is field3') builder.AddIntOutputField('field_only_func1', 'This is field ?',", "} }) class ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': {", "}) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1, cps2) def testHashCompNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval':", "{'str_field': [True, 'str', '!eq sss']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'}) self.assertEqual(", "'eval': { func_name: func_arg or {} }, 'expect': expect_field } if information is", "[False, 'str']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': 'sss'}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1',", "'sss' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', [{ 'hex_field': [True, 'hex', '!eq 0x0AAAA'] },", "}]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', [{ 'hex_field': [True, 'hex', '!eq 0x0AAAA'] }, {", "func_arg={'arg_1': 'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self): p = probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "'field2', 'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2']) self.assertCountEqual( [f.name for f in d.probe_functions['func_1'].output_fields], ['field1',", "[True, 'hex', '!eq 0x0AAAA']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' },", "'This is func 2.') builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '')", "'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1, cps2) def testHashCompNamesDiffer(self): cps1 =", "= probe_config_types.ComponentProbeStatement('category2', 'comp1', { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True,", "'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1':", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': re.compile('x.*')}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', { 'str_field_started_with_a': [True, 'str',", "= probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True,", "'!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', { 'eval': { 'func_1': {}", "1'] } } } })) def testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': {", "probe_config_types.ComponentProbeStatement.FromDict, { 123: { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect': {", "self._GenerateExpectResult('comp_1', 'func_1', {})) def testGenerateProbeStatementIntField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': None}) self.assertEqual(", "'func_1', {'int_field': 2})) result = p.DumpToString() self.assertEqual( json_utils.LoadStr(result), { 'category_x': { 'comp_1': {", "'func_1', {'int_field': [False, 'int']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 3}) self.assertEqual( result,", "or {} }, 'expect': expect_field } if information is not None: statement['information'] =", "'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', { 'eval':", "testAll(self): p = probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2',", "[True, 'hex', '!eq 0x0AAAA']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': 'xyz'}) result =", "'!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {}", "'expect': { 'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1',", "result = p.DumpToString() self.assertEqual( json_utils.LoadStr(result), { 'category_x': { 'comp_1': { 'eval': {'func_1': {}},", "{ 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }", "'int', '!eq 1'] } } }, }) def testFromDictCategoryNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, {", "} if information is not None: statement['information'] = information return probe_config_types.ComponentProbeStatement('category_x', comp_name, statement)", "'func_1', {'int_field': 3}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [True, 'int', '!eq 3']})) def", "} }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', { 'eval': { 'func_1': {} }, 'expect':", "def testHashFunctionNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect':", "'comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field':", "'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field': [True, 'hex',", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement(", "testGenerateProbeStatementIntField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field':", "{ 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']} }, 'comp_2': {", "} } }) def testFromDictComponentNameNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 3.1415926: {", "'expect': { 'int_field': [True, 'int', '!eq 1'] } } } }) def testFromDictComponentNameNotString(self):", "'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } } },", "'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2']) self.assertCountEqual( [f.name for f in d.probe_functions['func_1'].output_fields], ['field1', 'field2',", "{}}, 'expect': {'int_field': [True, 'int', '!eq 2']} }, 'comp_3': { 'eval': {'func_2': {}},", "re.compile('x.*')}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', { 'str_field_started_with_a': [True, 'str', '!re x.*'] })) def", "[True, 'hex', '!eq 0x0BAD']}, information={ 'comp_group': 'other_name'})) def testGenerateProbeStatementWithArgument(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "'str', '!re x.*'] })) def testGenerateProbeStatementHexField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': '0AAAA'})", "'func_1', { 'str_field_started_with_a': [True, 'str', '!re x.*'] })) def testGenerateProbeStatementHexField(self): result = self.probe_statement_definition.GenerateProbeStatement(", "{ 'str_field_started_with_a': [True, 'str', '!re x.*'] })) def testGenerateProbeStatementHexField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "cps2) def testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect':", "{ 'int_field': [True, 'int', '!eq 1'] } } } }) def testFromDictComponentNameNotString(self): self.assertRaises(", "component name confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 4})) with self.assertRaises(ValueError): # probe", "probe statement confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1', {'int_field': 2})) result = p.DumpToString() self.assertEqual(", "probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_2': {} }, 'expect': { 'int_field': [True, 'int',", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': '0AAAA'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field':", "})) def testGenerateProbeStatementHexField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': '0AAAA'}) self.assertEqual( result, self._GenerateExpectResult(", "'comp_4', 'func_1', {'int_field': 2})) result = p.DumpToString() self.assertEqual( json_utils.LoadStr(result), { 'category_x': { 'comp_1':", "{ 'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', {", "[True, 'int', '!eq 1'] } } } }) def testFromDictMiscErrors(self): self.assertRaises(ValueError, probe_config_types.ComponentProbeStatement.FromDict, {'category1':", "found in the LICENSE file. import re import unittest from cros.factory.probe.runtime_probe import probe_config_types", "}]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) result = self.probe_statement_definition.GenerateProbeStatement(", "'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1',", "class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self, comp_name, func_name, expect_field, func_arg=None, information=None): statement = { 'eval':", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 3}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [True,", "self.assertCountEqual( [f.name for f in d.probe_functions['func_1'].output_fields], ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual( [f.name for", "1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1', {", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'str_field_started_with_a': [True, 'str',", "1'] } } }, }) def testFromDictCategoryNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 123: {", "result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "{ 'int_field': [True, 'int', '!eq 1'] } } } })) def testFromDictValueHashMultipleCategories(self): self.assertRaises(", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits': [False, 'hex']})) result", "'int_field': [True, 'int', '!eq 1'] } } } })) def testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError,", "is func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('field1', 'This is field1') builder.AddStrOutputField('field2',", "{ func_name: func_arg or {} }, 'expect': expect_field } if information is not", "'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }, { 'str_field': 'sss' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1',", "statement confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1', {'int_field': 2})) result = p.DumpToString() self.assertEqual( json_utils.LoadStr(result),", "{} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } } }, 'category2':", "'func_1', {'str_field_started_with_a': [True, 'str', '!eq a_value']})) with self.assertRaises(ValueError): # format error self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "func_name: func_arg or {} }, 'expect': expect_field } if information is not None:", "[True, 'str', '!eq sss']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'}) self.assertEqual( result,", "{} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } } } }))", "= probe_config_types.ComponentProbeStatement('category1', 'comp2', { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True,", "'int', '!eq 1'] } } } }) def testFromDictMiscErrors(self): self.assertRaises(ValueError, probe_config_types.ComponentProbeStatement.FromDict, {'category1': 100})", "[True, 'int', '!eq 1'] } } } }) def testFromDictMultipleComponents(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict,", "x.*'] })) def testGenerateProbeStatementHexField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': '0AAAA'}) self.assertEqual( result,", "= { 'eval': { func_name: func_arg or {} }, 'expect': expect_field } if", "[True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', { 'eval': {", "self.assertNotEqual(cps1, cps2) def testHashFunctionNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {}", "p.DumpToString() self.assertEqual( json_utils.LoadStr(result), { 'category_x': { 'comp_1': { 'eval': {'func_1': {}}, 'expect': {'int_field':", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category2',", "json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.')", "[True, 'hex', '!eq 0x0AAAA'] }, { 'str_field': [True, 'str', '!eq sss'] }])) def", "'func_1', {'int_field': 4})) with self.assertRaises(ValueError): # probe statement confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1',", "2})) result = p.DumpToString() self.assertEqual( json_utils.LoadStr(result), { 'category_x': { 'comp_1': { 'eval': {'func_1':", "with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'}) def testGenerateProbeStatementList(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "{}, probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {}, func_arg={'arg_1': 'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self):", "{ 'int_field': [True, 'int', '!eq 1'] } } } }) def testFromDictMiscErrors(self): self.assertRaises(ValueError,", "expect_field, func_arg=None, information=None): statement = { 'eval': { func_name: func_arg or {} },", "'category1': { 'comp1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True,", "if information is not None: statement['information'] = information return probe_config_types.ComponentProbeStatement('category_x', comp_name, statement) def", "cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "'other_name'})) def testGenerateProbeStatementWithArgument(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1',", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'str_field_started_with_a':", "'0AAAA' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) result =", "} } })) def testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': {", "self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', [{ 'hex_field': [True, 'hex', '!eq 0x0AAAA'] }, { 'str_field':", "'comp1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq", "result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [False, 'str']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': 'sss'})", "1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*'))", "'int', '!eq 2']} }, 'comp_3': { 'eval': {'func_2': {}}, 'expect': {'int_field': [True, 'int',", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [False, 'int']}))", "}) def testFromDictCategoryNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 123: { 'comp_name1': { 'eval': {", "with self.assertRaises(ValueError): # format error self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'}) # Ignore the", "'expect': { 'int_field': [True, 'int', '!eq 1'] } } }, }) def testFromDictCategoryNotString(self):", "'func_1', {'hex_field_three_digits': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits': [False, 'hex']})) result = self.probe_statement_definition.GenerateProbeStatement(", "{'hex_field_three_digits': 'B3F'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']})) with", "[True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': {", "def testGenerateProbeStatementWithArgument(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1',", "'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2)", "ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self): p = probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 1})) p.AddComponentProbeStatement(", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [False, 'int']})) result", "'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', { 'eval': { 'func_1':", "'!eq 1'] } } }, 'category2': { 'comp_name1': { 'eval': { 'func_1': {}", "'field_only_func1']) self.assertCountEqual( [f.name for f in d.probe_functions['func2'].output_fields], ['field1', 'field2', 'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def", "'category_x': { 'comp_1': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']}", "'!re x.*'] })) def testGenerateProbeStatementHexField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': '0AAAA'}) self.assertEqual(", "comp_name, statement) def testGenerateProbeStatementNoField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1',", "builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3) self.probe_statement_definition", "}), probe_config_types.ComponentProbeStatement.FromDict({ 'category1': { 'comp1': { 'eval': { 'func_1': {} }, 'expect': {", "is field2') builder.AddHexOutputField('field3', 'This is field3') builder.AddIntOutputField('field_only_func1', 'This is field ?', probe_function_names=['func_1']) d", "{'int_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [False, 'int']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "'func_1', {}, func_arg={'arg_1': 'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self): p = probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement(", "'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'}) # Ignore the regular expression check if the given", "self.probe_statement_definition = builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self, comp_name, func_name, expect_field, func_arg=None, information=None): statement", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', { 'str_field': 'sss', 'int_field': 3, 'hex_field': '0BAD'}, information={'comp_group': 'other_name'}) self.assertEqual(", "self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True,", "self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', { 'str_field': [True, 'str', '!eq sss'], 'int_field': [True,", "'hex_field': '0BAD'}, information={'comp_group': 'other_name'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', { 'str_field': [True, 'str',", "[True, 'int', '!eq 1'] } } }, }) def testFromDictCategoryNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict,", "} }) def testFromDictComponentNameNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 3.1415926: { 'eval':", "a_value']})) with self.assertRaises(ValueError): # format error self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'}) # Ignore", "}) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', { 'eval': { 'func_1': {} }, 'expect': {", "'This is func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field', '')", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 3}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [True, 'int', '!eq", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {})) def testGenerateProbeStatementIntField(self): result = self.probe_statement_definition.GenerateProbeStatement(", "}, }) def testFromDictCategoryNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 123: { 'comp_name1': { 'eval':", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [False,", "'hex_field': '0AAAA' }, { 'str_field': 'sss' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', [{ 'hex_field':", "'!eq sss'], 'int_field': [True, 'int', '!eq 3'], 'hex_field': [True, 'hex', '!eq 0x0BAD']}, information={", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 3}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [True, 'int',", "} }) cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', { 'eval': { 'func_1': {} }, 'expect':", "1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} },", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': '0AAAA'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field': [True, 'hex',", "def testGenerateProbeStatementNoField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {})) def", "# an regular expression pattern. result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': re.compile('x.*')}) self.assertEqual(", "expression pattern. result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': re.compile('x.*')}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1',", "is func 2.') builder.AddIntOutputField('field1', 'This is field1') builder.AddStrOutputField('field2', 'This is field2') builder.AddHexOutputField('field3', 'This", "'expect': expect_field } if information is not None: statement['information'] = information return probe_config_types.ComponentProbeStatement('category_x',", "'!eq 0x0AAAA']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': 'xyz'}) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "'str_field': [True, 'str', '!eq sss'], 'int_field': [True, 'int', '!eq 3'], 'hex_field': [True, 'hex',", "testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval': { 'func_1': {}", "} }, 'category2': { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect': {", "{ 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1']", "{} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }, 'comp_name2': {", "Ignore the regular expression check if the given expected value is also #", "self._GenerateExpectResult('comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{", "}) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': {", "}, } }) class ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval':", "'int_field': [True, 'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashFunctionNamesDiffer(self):", "'!eq 3'], 'hex_field': [True, 'hex', '!eq 0x0BAD']}, information={ 'comp_group': 'other_name'})) def testGenerateProbeStatementWithArgument(self): result", "}) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashCategoryNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval':", "'hex', '!eq 0x0AAAA']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': 'xyz'}) result = self.probe_statement_definition.GenerateProbeStatement(", "func 2.') builder.AddIntOutputField('field1', 'This is field1') builder.AddStrOutputField('field2', 'This is field2') builder.AddHexOutputField('field3', 'This is", "'hex', '!eq 0xB3F']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'}) def testGenerateProbeStatementList(self): result", "cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "{ 'str_field': 'sss' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', [{ 'hex_field': [True, 'hex', '!eq", "'!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', { 'eval': { 'func_1': {}", "field ?', probe_function_names=['func_1']) d = builder.Build() self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2', 'field3', 'field_only_func1'])", "def testGenerateProbeStatementStrField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1',", "1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', { 'eval': { 'func_1': {} },", "code is governed by a BSD-style license that can be # found in", "is field1') builder.AddStrOutputField('field2', 'This is field2') builder.AddHexOutputField('field3', 'This is field3') builder.AddIntOutputField('field_only_func1', 'This is", "'func_2': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash,", "{ 3.1415926: { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int',", "probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {}, func_arg={'arg_1': 'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self): p", "'expect': { 'int_field': [True, 'int', '!eq 1'] } } } }) def testFromDictMultipleComponents(self):", "[True, 'int', '!eq 1'] } }), probe_config_types.ComponentProbeStatement.FromDict({ 'category1': { 'comp1': { 'eval': {", "'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'}) def testGenerateProbeStatementList(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field':", "'int', '!eq 1'] } }), probe_config_types.ComponentProbeStatement.FromDict({ 'category1': { 'comp1': { 'eval': { 'func_1':", "0x0BAD']}, information={ 'comp_group': 'other_name'})) def testGenerateProbeStatementWithArgument(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}, probe_function_argument={'arg_1':", "}, 'comp_3': { 'eval': {'func_2': {}}, 'expect': {'int_field': [True, 'int', '!eq 3']} },", "self.assertNotEqual(cps1, cps2) def testHashCategoryNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {}", "'This is func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('field1', 'This is field1')", "builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3) self.probe_statement_definition = builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self, comp_name,", "}, 'expect': expect_field } if information is not None: statement['information'] = information return", "'!eq 1'] } }), probe_config_types.ComponentProbeStatement.FromDict({ 'category1': { 'comp1': { 'eval': { 'func_1': {}", "None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits': [False, 'hex']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {}", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': 'sss'}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [True, 'str',", "func_arg=None, information=None): statement = { 'eval': { func_name: func_arg or {} }, 'expect':", "self.assertRaises(ValueError): # component name confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 4})) with self.assertRaises(ValueError):", "All rights reserved. # Use of this source code is governed by a", "probe_config_types.ComponentProbeStatement.FromDict({ 'category1': { 'comp1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "field1') builder.AddStrOutputField('field2', 'This is field2') builder.AddHexOutputField('field3', 'This is field3') builder.AddIntOutputField('field_only_func1', 'This is field", "'str_field_started_with_a': [True, 'str', '!re x.*'] })) def testGenerateProbeStatementHexField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "'comp_1', 'func_1', {'hex_field': '0AAAA'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq", "'expect': { 'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2',", "'0AAAA' }, { 'str_field': 'sss' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', [{ 'hex_field': [True,", "builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field',", "class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2',", "'comp2', { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq", "governed by a BSD-style license that can be # found in the LICENSE", "2.') builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3)", "self._GenerateExpectResult('comp_1', 'func_1', {}, func_arg={'arg_1': 'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self): p = probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement(", "# component name confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 4})) with self.assertRaises(ValueError): #", "f in d.probe_functions['func_1'].output_fields], ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual( [f.name for f in d.probe_functions['func2'].output_fields],", "{ 'int_field': [True, 'int', '!eq 1'] } }, 'comp_name2': { 'eval': { 'func_1':", "result, self._GenerateExpectResult('comp_1', 'func_1', [{ 'hex_field': [True, 'hex', '!eq 0x0AAAA'] }, { 'str_field': [True,", "reserved. # Use of this source code is governed by a BSD-style license", "'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': 'xyz'})", "'comp_1': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']} }, 'comp_2':", "cps2.statement_hash) self.assertEqual(cps1, cps2) def testHashCompNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1':", "regular expression pattern. result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': re.compile('x.*')}) self.assertEqual( result, self._GenerateExpectResult('comp_1',", "None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [False, 'str']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "{'hex_field': 'xyz'}) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1',", "{ 'str_field': 'sss', 'int_field': 3, 'hex_field': '0BAD'}, information={'comp_group': 'other_name'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1',", "'int', '!eq 1'] } }, 'comp_name2': { 'eval': { 'func_1': {} }, 'expect':", "{ 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } },", "ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self, comp_name, func_name, expect_field, func_arg=None, information=None): statement = { 'eval': {", "result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [False, 'int']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 3})", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [False, 'str']}))", "{'hex_field': [True, 'hex', '!eq 0x0AAAA']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA'", "'hex', '!eq 0x0AAAA'] }, { 'str_field': [True, 'str', '!eq sss'] }])) def testGenerateProbeStatementExtraInformation(self):", "2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2', {'int_field': 3})) with self.assertRaises(ValueError): # component name confliction", "= probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('int_field',", "'comp_1', 'func_1', {'int_field': 3}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [True, 'int', '!eq 3']}))", "value is also # an regular expression pattern. result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "in the LICENSE file. import re import unittest from cros.factory.probe.runtime_probe import probe_config_types from", "d = builder.Build() self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2'])", "builder.AddIntOutputField('field_only_func1', 'This is field ?', probe_function_names=['func_1']) d = builder.Build() self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1',", "[True, 'str', '!eq sss'] }])) def testGenerateProbeStatementExtraInformation(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {", "'expect': { 'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1',", "from cros.factory.utils import json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This", "} }), probe_config_types.ComponentProbeStatement.FromDict({ 'category1': { 'comp1': { 'eval': { 'func_1': {} }, 'expect':", "'field2', 'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func", "'func_1', {'int_field': [True, 'int', '!eq 3']})) def testGenerateProbeStatementStrField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('int_field', '')", "self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1':", "}, { 'str_field': [True, 'str', '!eq sss'] }])) def testGenerateProbeStatementExtraInformation(self): result = self.probe_statement_definition.GenerateProbeStatement(", "['func_1', 'func2']) self.assertCountEqual( [f.name for f in d.probe_functions['func_1'].output_fields], ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual(", "'comp_2', 'func_1', {'int_field': 4})) with self.assertRaises(ValueError): # probe statement confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4',", "'comp_1', 'func_1', {}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {})) def testGenerateProbeStatementIntField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "'int_field': [True, 'int', '!eq 1'] } }), probe_config_types.ComponentProbeStatement.FromDict({ 'category1': { 'comp1': { 'eval':", "{ 'category1': { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }, { 'str_field': 'sss' }]) self.assertEqual( result,", "{'int_field': [True, 'int', '!eq 1']} }, 'comp_2': { 'eval': {'func_1': {}}, 'expect': {'int_field':", "[True, 'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testFromDictSucceed(self): self.assertEqual(", "self._GenerateExpectResult('comp_1', 'func_1', { 'str_field_started_with_a': [True, 'str', '!re x.*'] })) def testGenerateProbeStatementHexField(self): result =", "{'str_field': [False, 'str']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': 'sss'}) self.assertEqual( result, self._GenerateExpectResult('comp_1',", "'This is field1') builder.AddStrOutputField('field2', 'This is field2') builder.AddHexOutputField('field3', 'This is field3') builder.AddIntOutputField('field_only_func1', 'This", "result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "'', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3) self.probe_statement_definition = builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def", "'') builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3) self.probe_statement_definition = builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self, comp_name, func_name,", "is also # an regular expression pattern. result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a':", "result, self._GenerateExpectResult( 'comp_1', 'func_1', { 'str_field': [True, 'str', '!eq sss'], 'int_field': [True, 'int',", "{ 'comp_1': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']} },", "'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash,", "self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [False, 'str']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': 'sss'}) self.assertEqual(", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1,", "} }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_2': {} }, 'expect':", "[True, 'int', '!eq 1'] } } } })) def testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict,", "} })) def testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval':", "class ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {}", "} } }) def testFromDictMultipleComponents(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': {", "cps2) def testHashFunctionNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} },", "source code is governed by a BSD-style license that can be # found", "Copyright 2020 The Chromium OS Authors. All rights reserved. # Use of this", "self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement(", "'func_1', {'hex_field_three_digits': [False, 'hex']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'}) self.assertEqual( result,", "in d.probe_functions['func_1'].output_fields], ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual( [f.name for f in d.probe_functions['func2'].output_fields], ['field1',", "result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [True, 'str', '!eq sss']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "'str_field': 'sss' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', [{ 'hex_field': [True, 'hex', '!eq 0x0AAAA']", "}) cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', { 'eval': { 'func_1': {} }, 'expect': {", "= probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_2': {} }, 'expect': { 'int_field': [True,", "# format error self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'}) # Ignore the regular expression", "'int_field': [True, 'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testFromDictSucceed(self):", "given expected value is also # an regular expression pattern. result = self.probe_statement_definition.GenerateProbeStatement(", "'int_field': [True, 'int', '!eq 1'] } } } }) def testFromDictMiscErrors(self): self.assertRaises(ValueError, probe_config_types.ComponentProbeStatement.FromDict,", "1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2', {'int_field': 3}))", "self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': 'xyz'}) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': None})", "builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a', '', value_pattern=re.compile('a.*')) builder.AddHexOutputField('hex_field', '') builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3) self.probe_statement_definition = builder.Build()", "ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 3.1415926: { 'eval': { 'func_1': {} }, 'expect':", "'comp_2': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 2']} }, 'comp_3':", "probe_config_types.ComponentProbeStatement('category1', 'comp2', { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int',", "'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 2']} }, 'comp_3': { 'eval':", "'!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1, cps2) def testHashCompNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1',", "'func_1', {'str_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [False, 'str']})) result = self.probe_statement_definition.GenerateProbeStatement(", "'expect': { 'int_field': [True, 'int', '!eq 1'] } }, 'comp_name2': { 'eval': {", "format error self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'}) # Ignore the regular expression check", "'!eq 0xB3F']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'}) def testGenerateProbeStatementList(self): result =", "sss']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1',", "'!eq 0x0BAD']}, information={ 'comp_group': 'other_name'})) def testGenerateProbeStatementWithArgument(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {},", "'!eq sss'] }])) def testGenerateProbeStatementExtraInformation(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', { 'str_field': 'sss',", "[True, 'int', '!eq 3']} }, } }) class ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self): cps1 =", "1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1, cps2) def testHashCompNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1',", "'func_1', {}, probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {}, func_arg={'arg_1': 'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def", "'!eq sss']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1',", "['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2']) self.assertCountEqual( [f.name for f in d.probe_functions['func_1'].output_fields],", "expected value is also # an regular expression pattern. result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']} }, 'comp_2': { 'eval':", "d.probe_functions['func_1'].output_fields], ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual( [f.name for f in d.probe_functions['func2'].output_fields], ['field1', 'field2',", "[False, 'hex']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1',", "the regular expression check if the given expected value is also # an", "def testHashCategoryNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect':", "'This is field ?', probe_function_names=['func_1']) d = builder.Build() self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2',", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1,", "'expect': { 'int_field': [True, 'int', '!eq 1'] } } } }) def testFromDictMiscErrors(self):", "{'str_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [False, 'str']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "'comp_1', 'func_1', {'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits':", "builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self, comp_name, func_name, expect_field, func_arg=None, information=None): statement = {", "builder.AddHexOutputField('hex_field_three_digits', '', num_value_digits=3) self.probe_statement_definition = builder.Build() class ProbeStatementDefinitionTest(ConcreteProbeStatementDefinitionTestBase): def _GenerateExpectResult(self, comp_name, func_name, expect_field,", "None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [False, 'int']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 3.1415926: { 'eval': { 'func_1': {} },", "a BSD-style license that can be # found in the LICENSE file. import", "'0AAAA'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) with self.assertRaises(ValueError):", "{ 'category1': { 3.1415926: { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "{ 'int_field': [True, 'int', '!eq 1'] } }), probe_config_types.ComponentProbeStatement.FromDict({ 'category1': { 'comp1': {", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits': [False,", "statement = { 'eval': { func_name: func_arg or {} }, 'expect': expect_field }", "None: statement['information'] = information return probe_config_types.ComponentProbeStatement('category_x', comp_name, statement) def testGenerateProbeStatementNoField(self): result = self.probe_statement_definition.GenerateProbeStatement(", "'expect': { 'int_field': [True, 'int', '!eq 1'] } } } })) def testFromDictValueHashMultipleCategories(self):", "'int', '!eq 1'] } } } }) def testFromDictMultipleComponents(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, {", "information=None): statement = { 'eval': { func_name: func_arg or {} }, 'expect': expect_field", "probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int',", "'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', { 'eval': { 'func_1':", "def testGenerateProbeStatementHexField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': '0AAAA'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1',", "an regular expression pattern. result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': re.compile('x.*')}) self.assertEqual( result,", "{ 'int_field': [True, 'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def", "1'] } } } }) def testFromDictMiscErrors(self): self.assertRaises(ValueError, probe_config_types.ComponentProbeStatement.FromDict, {'category1': 100}) if __name__", "'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }), probe_config_types.ComponentProbeStatement.FromDict({", "testBuildProbeStatementDefinition(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is func", "[True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', { 'eval': {", "1'] } }, 'comp_name2': { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "} }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect':", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1',", "Authors. All rights reserved. # Use of this source code is governed by", "'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'str_field_started_with_a': [True, 'str', '!eq", "confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 4})) with self.assertRaises(ValueError): # probe statement confliction.", "def testIdenticalStatements(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect':", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'a_value'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'str_field_started_with_a': [True,", "The Chromium OS Authors. All rights reserved. # Use of this source code", "Use of this source code is governed by a BSD-style license that can", "'!eq a_value']})) with self.assertRaises(ValueError): # format error self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'}) #", "{'hex_field_three_digits': 'B3FF'}) def testGenerateProbeStatementList(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }])", "{} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash)", "f in d.probe_functions['func2'].output_fields], ['field1', 'field2', 'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x')", "[True, 'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashFunctionNamesDiffer(self): cps1", "with self.assertRaises(ValueError): # component name confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 4})) with", "'expect': {'int_field': [True, 'int', '!eq 1']} }, 'comp_2': { 'eval': {'func_1': {}}, 'expect':", "import probe_config_types from cros.factory.utils import json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x')", "def testGenerateProbeStatementList(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }]) self.assertEqual( result,", "self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "'int', '!eq 1'] } } } }) def testFromDictComponentNameNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, {", "p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2', {'int_field': 3})) with self.assertRaises(ValueError): # component name confliction p.AddComponentProbeStatement(", "result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'str_field': [False,", "'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field':", "p = probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1',", "for f in d.probe_functions['func_1'].output_fields], ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual( [f.name for f in", "'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashFunctionNamesDiffer(self): cps1 =", "self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashFunctionNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': {", "{ 'category_x': { 'comp_1': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq", "}) self.assertNotEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashFunctionNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval':", "self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1', {'int_field': 2})) result = p.DumpToString() self.assertEqual( json_utils.LoadStr(result), { 'category_x': {", "self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 4})) with self.assertRaises(ValueError): # probe statement confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement(", "def testHashCompNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect':", "{'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 1']} }, 'comp_2': { 'eval': {'func_1':", "'expect': { 'int_field': [True, 'int', '!eq 1'] } }), probe_config_types.ComponentProbeStatement.FromDict({ 'category1': { 'comp1':", "= probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 1})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field':", "'func_1', {'str_field_started_with_a': re.compile('x.*')}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', { 'str_field_started_with_a': [True, 'str', '!re x.*']", "self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits': [False, 'hex']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits':", "}) class ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1':", "p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field': 2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2', {'int_field': 3})) with", "rights reserved. # Use of this source code is governed by a BSD-style", "{'hex_field_three_digits': [False, 'hex']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'}) self.assertEqual( result, self._GenerateExpectResult(", "'func_1', [{ 'hex_field': '0AAAA' }]) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq", "is func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('int_field', '') builder.AddStrOutputField('str_field', '') builder.AddStrOutputField('str_field_started_with_a',", "= information return probe_config_types.ComponentProbeStatement('category_x', comp_name, statement) def testGenerateProbeStatementNoField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "{} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } } }, })", "{}}, 'expect': {'int_field': [True, 'int', '!eq 1']} }, 'comp_2': { 'eval': {'func_1': {}},", "{ 'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', {", "0x0AAAA']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }, { 'str_field': 'sss'", "1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category2', 'comp1', { 'eval': { 'func_1': {} },", "'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) cps2", "'comp_3': { 'eval': {'func_2': {}}, 'expect': {'int_field': [True, 'int', '!eq 3']} }, }", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }), probe_config_types.ComponentProbeStatement.FromDict({ 'category1': {", "probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('field1', 'This", "{ 'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }),", "'aaa'})) class ProbeConfigPayloadTest(ConcreteProbeStatementDefinitionTestBase): def testAll(self): p = probe_config_types.ProbeConfigPayload() p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field':", "{})) def testGenerateProbeStatementIntField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1',", "'int']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 3}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field':", "[True, 'int', '!eq 1'] } }, 'comp_name2': { 'eval': { 'func_1': {} },", "{ 'int_field': [True, 'int', '!eq 1'] } } }, }) def testFromDictCategoryNotString(self): self.assertRaises(", "by a BSD-style license that can be # found in the LICENSE file.", "}, 'expect': { 'int_field': [True, 'int', '!eq 1'] } } }, 'category2': {", "self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'}) def testGenerateProbeStatementList(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1',", "'!eq 3']} }, } }) class ComponentProbeStatementTest(unittest.TestCase): def testIdenticalStatements(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1',", "testHashFunctionNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': {", "{} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) cps2 =", "python3 # Copyright 2020 The Chromium OS Authors. All rights reserved. # Use", "'func_1', {'int_field': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [False, 'int']})) result = self.probe_statement_definition.GenerateProbeStatement(", "3}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [True, 'int', '!eq 3']})) def testGenerateProbeStatementStrField(self): result", "'!eq 1'] } } } })) def testFromDictValueHashMultipleCategories(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1':", "cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1', {'int_field': 2})) result = p.DumpToString() self.assertEqual( json_utils.LoadStr(result), { 'category_x':", "} } } }) def testFromDictMultipleComponents(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1':", "self.assertRaises(ValueError): # probe statement confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1', {'int_field': 2})) result =", "import re import unittest from cros.factory.probe.runtime_probe import probe_config_types from cros.factory.utils import json_utils class", "testGenerateProbeStatementWithArgument(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}, probe_function_argument={'arg_1': 'aaa'}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {},", "'B3FF'}) def testGenerateProbeStatementList(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', [{ 'hex_field': '0AAAA' }]) self.assertEqual(", "'func_1', {'str_field_started_with_a': 'a_value'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'str_field_started_with_a': [True, 'str', '!eq a_value']}))", "{'int_field': [False, 'int']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 3}) self.assertEqual( result, self._GenerateExpectResult('comp_1',", "[{ 'hex_field': [True, 'hex', '!eq 0x0AAAA'] }, { 'str_field': [True, 'str', '!eq sss']", "{} }, 'expect': expect_field } if information is not None: statement['information'] = information", "probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 3.1415926: { 'eval': { 'func_1': {} }, 'expect': {", "3'], 'hex_field': [True, 'hex', '!eq 0x0BAD']}, information={ 'comp_group': 'other_name'})) def testGenerateProbeStatementWithArgument(self): result =", "{ 'eval': { func_name: func_arg or {} }, 'expect': expect_field } if information", "testFromDictMultipleComponents(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 'category1': { 'comp_name1': { 'eval': { 'func_1': {}", "0xB3F']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'}) def testGenerateProbeStatementList(self): result = self.probe_statement_definition.GenerateProbeStatement(", "testIdenticalStatements(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': {", "from cros.factory.probe.runtime_probe import probe_config_types from cros.factory.utils import json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def testBuildProbeStatementDefinition(self): builder", "# Use of this source code is governed by a BSD-style license that", "123: { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True,", "self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field': [True, 'hex', '!eq 0x0AAAA']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1',", "in d.probe_functions['func2'].output_fields], ['field1', 'field2', 'field3']) class ConcreteProbeStatementDefinitionTestBase(unittest.TestCase): def setUp(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1',", "statement) def testGenerateProbeStatementNoField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {}))", "self._GenerateExpectResult('comp_1', 'func_1', {'int_field': [False, 'int']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'int_field': 3}) self.assertEqual(", "Chromium OS Authors. All rights reserved. # Use of this source code is", "'int', '!eq 3']})) def testGenerateProbeStatementStrField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field': None}) self.assertEqual(", "'func_1', {'hex_field_three_digits': 'B3F'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']}))", "'func_1', {'hex_field_three_digits': [True, 'hex', '!eq 0xB3F']})) with self.assertRaises(ValueError): self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3FF'})", "}, 'comp_2': { 'eval': {'func_1': {}}, 'expect': {'int_field': [True, 'int', '!eq 2']} },", "def setUp(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is", "self.assertNotEqual(cps1, cps2) def testFromDictSucceed(self): self.assertEqual( probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} },", "[True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1, cps2) def testHashCompNamesDiffer(self): cps1", "with self.assertRaises(ValueError): # probe statement confliction. p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_4', 'func_1', {'int_field': 2})) result", "def _GenerateExpectResult(self, comp_name, func_name, expect_field, func_arg=None, information=None): statement = { 'eval': { func_name:", "} }, }) def testFromDictCategoryNotString(self): self.assertRaises( ValueError, probe_config_types.ComponentProbeStatement.FromDict, { 123: { 'comp_name1': {", "}])) def testGenerateProbeStatementExtraInformation(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', { 'str_field': 'sss', 'int_field': 3,", "{'int_field': 2})) result = p.DumpToString() self.assertEqual( json_utils.LoadStr(result), { 'category_x': { 'comp_1': { 'eval':", "def testGenerateProbeStatementExtraInformation(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', { 'str_field': 'sss', 'int_field': 3, 'hex_field':", "testHashCompNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} }, 'expect': {", "import unittest from cros.factory.probe.runtime_probe import probe_config_types from cros.factory.utils import json_utils class ProbeStatementDefinitionBuilderTest(unittest.TestCase): def", "testGenerateProbeStatementNoField(self): result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {}) self.assertEqual(result, self._GenerateExpectResult('comp_1', 'func_1', {})) def testGenerateProbeStatementIntField(self):", "result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits': [False, 'hex']})) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': 'B3F'})", "is field ?', probe_function_names=['func_1']) d = builder.Build() self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2', 'field3',", "'xyz'}) result = self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field_three_digits': None}) self.assertEqual( result, self._GenerateExpectResult('comp_1', 'func_1', {'hex_field_three_digits':", "1.') builder.AddProbeFunction('func2', 'This is func 2.') builder.AddIntOutputField('field1', 'This is field1') builder.AddStrOutputField('field2', 'This is", "'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp2', { 'eval':", "}) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_2': {} }, 'expect': {", "def testBuildProbeStatementDefinition(self): builder = probe_config_types.ProbeStatementDefinitionBuilder('category_x') builder.AddProbeFunction('func_1', 'This is func 1.') builder.AddProbeFunction('func2', 'This is", "[True, 'int', '!eq 1'] } } }, 'category2': { 'comp_name1': { 'eval': {", "'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertEqual(cps1, cps2)", "'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_2':", "{'int_field': 3})) with self.assertRaises(ValueError): # component name confliction p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_2', 'func_1', {'int_field':", "error self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'str_field_started_with_a': 'b_value'}) # Ignore the regular expression check if", "cps2) def testHashCompNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval': { 'func_1': {} },", "'int_field': [True, 'int', '!eq 1'] } }) cps2 = probe_config_types.ComponentProbeStatement('category1', 'comp1', { 'eval':", "self._GenerateExpectResult( 'comp_1', 'func_1', {'str_field_started_with_a': [True, 'str', '!eq a_value']})) with self.assertRaises(ValueError): # format error", "'0BAD'}, information={'comp_group': 'other_name'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', { 'str_field': [True, 'str', '!eq", "'comp_2', 'func_1', {'int_field': 2})) p.AddComponentProbeStatement( self.probe_statement_definition.GenerateProbeStatement( 'comp_3', 'func_2', {'int_field': 3})) with self.assertRaises(ValueError): #", "probe_config_types.ComponentProbeStatement('category2', 'comp1', { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True, 'int',", "1'] } }) self.assertEqual(cps1.statement_hash, cps2.statement_hash) self.assertNotEqual(cps1, cps2) def testHashCategoryNamesDiffer(self): cps1 = probe_config_types.ComponentProbeStatement('category1', 'comp1',", "# Copyright 2020 The Chromium OS Authors. All rights reserved. # Use of", "{ 123: { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field':", "builder.Build() self.assertEqual(d.category_name, 'category_x') self.assertCountEqual(list(d.expected_fields.keys()), ['field1', 'field2', 'field3', 'field_only_func1']) self.assertCountEqual(list(d.probe_functions.keys()), ['func_1', 'func2']) self.assertCountEqual( [f.name", "= self.probe_statement_definition.GenerateProbeStatement( 'comp_1', 'func_1', {'hex_field': '0AAAA'}) self.assertEqual( result, self._GenerateExpectResult( 'comp_1', 'func_1', {'hex_field': [True,", "{ 'int_field': [True, 'int', '!eq 1'] } } } }) def testFromDictMultipleComponents(self): self.assertRaises(", "2020 The Chromium OS Authors. All rights reserved. # Use of this source", "'func_1': {} }, 'expect': { 'int_field': [True, 'int', '!eq 1'] } }) self.assertNotEqual(cps1.statement_hash,", "'category2': { 'comp_name1': { 'eval': { 'func_1': {} }, 'expect': { 'int_field': [True," ]
[ "Generated by Django 2.2.7 on 2019-11-19 21:32 from django.db import migrations, models class", "migrations, models class Migration(migrations.Migration): dependencies = [ ('submissions', '0006_merge_20191113_0542'), ] operations = [", "verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ], ), migrations.AddField( model_name='submission', name='tags', field=models.ManyToManyField(to='submissions.SubmissionTag', verbose_name='tags'), ), ]", "= [ ('submissions', '0006_merge_20191113_0542'), ] operations = [ migrations.CreateModel( name='SubmissionTag', fields=[ ('id', models.AutoField(auto_created=True,", "'0006_merge_20191113_0542'), ] operations = [ migrations.CreateModel( name='SubmissionTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ], ), migrations.AddField( model_name='submission', name='tags', field=models.ManyToManyField(to='submissions.SubmissionTag', verbose_name='tags'), ),", "# Generated by Django 2.2.7 on 2019-11-19 21:32 from django.db import migrations, models", "Migration(migrations.Migration): dependencies = [ ('submissions', '0006_merge_20191113_0542'), ] operations = [ migrations.CreateModel( name='SubmissionTag', fields=[", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ], ), migrations.AddField( model_name='submission', name='tags', field=models.ManyToManyField(to='submissions.SubmissionTag',", "migrations.CreateModel( name='SubmissionTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ], ),", "2019-11-19 21:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('submissions',", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('submissions', '0006_merge_20191113_0542'), ] operations", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ], ), migrations.AddField( model_name='submission',", "2.2.7 on 2019-11-19 21:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "operations = [ migrations.CreateModel( name='SubmissionTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100,", "= [ migrations.CreateModel( name='SubmissionTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)),", "[ ('submissions', '0006_merge_20191113_0542'), ] operations = [ migrations.CreateModel( name='SubmissionTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "[ migrations.CreateModel( name='SubmissionTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ],", "on 2019-11-19 21:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "by Django 2.2.7 on 2019-11-19 21:32 from django.db import migrations, models class Migration(migrations.Migration):", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('submissions', '0006_merge_20191113_0542'), ] operations =", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ], ), migrations.AddField( model_name='submission', name='tags',", "primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ], ), migrations.AddField( model_name='submission', name='tags', field=models.ManyToManyField(to='submissions.SubmissionTag', verbose_name='tags'),", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('submissions', '0006_merge_20191113_0542'), ]", "] operations = [ migrations.CreateModel( name='SubmissionTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "dependencies = [ ('submissions', '0006_merge_20191113_0542'), ] operations = [ migrations.CreateModel( name='SubmissionTag', fields=[ ('id',", "class Migration(migrations.Migration): dependencies = [ ('submissions', '0006_merge_20191113_0542'), ] operations = [ migrations.CreateModel( name='SubmissionTag',", "('submissions', '0006_merge_20191113_0542'), ] operations = [ migrations.CreateModel( name='SubmissionTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "Django 2.2.7 on 2019-11-19 21:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "21:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('submissions', '0006_merge_20191113_0542'),", "models class Migration(migrations.Migration): dependencies = [ ('submissions', '0006_merge_20191113_0542'), ] operations = [ migrations.CreateModel(", "name='SubmissionTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ], ), migrations.AddField(" ]
[ "sinc definition f0 = 100 Ns = 2000 Tp = 20.0 / Ns", "t2 = np.linspace(-10, 10, Ns * 2) y1 = sinc1(t / Tp) x", "plt.legend(['original', 'sinc interpolated']) plt.title('sinc(t/Tp), ' + \"Tp=\" + str(Tp)) plt.xlabel('Time/s') plt.ylabel('Amplitude') plt.grid() plt.show()", "= np.linspace(-10, 10, Ns) t2 = np.linspace(-10, 10, Ns * 2) y1 =", "t: y.append(np.sum(x * sinc1((tt - ns * T) / T))) return np.array(y) #", "np.sin(2 * PI * f0 * t) print(x.shape) y = sinc_interpolation(x, t2, Tp)", "plt PI = np.pi # =========================define sinc # ---------------normalized def sinc1(x): PI =", "x = np.array(x) y = np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI *", "plt.figure() plt.subplot(131) plt.plot(t, x, '^b') plt.plot(t2, y, '+r') plt.legend(['original', 'sinc interpolated']) plt.title('sinc(t/Tp), '", "= 100 Ns = 2000 Tp = 20.0 / Ns t = np.linspace(-10,", "sinc1(t / Tp) x = np.sin(2 * PI * f0 * t) print(x.shape)", "PI * f0 * t) print(x.shape) y = sinc_interpolation(x, t2, Tp) print(y.shape, \"===\")", "return y def sinc_interpolation(x, t, T): ns = np.arange(x.size) print(ns, \"============\") y =", "/ T))) return np.array(y) # =========================test sinc definition f0 = 100 Ns =", "y = [] for tt in t: y.append(np.sum(x * sinc1((tt - ns *", "= 2000 Tp = 20.0 / Ns t = np.linspace(-10, 10, Ns) t2", "np.linspace(-10, 10, Ns * 2) y1 = sinc1(t / Tp) x = np.sin(2", "def sinc_interpolation(x, t, T): ns = np.arange(x.size) print(ns, \"============\") y = [] for", "x = np.sin(2 * PI * f0 * t) print(x.shape) y = sinc_interpolation(x,", "# ---------------normalized def sinc1(x): PI = np.pi x = np.array(x) y = np.where(np.abs(PI", "np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI * x) / (PI * x))", "- ns * T) / T))) return np.array(y) # =========================test sinc definition f0", "matplotlib.pyplot as plt PI = np.pi # =========================define sinc # ---------------normalized def sinc1(x):", "plt.subplot(131) plt.plot(t, x, '^b') plt.plot(t2, y, '+r') plt.legend(['original', 'sinc interpolated']) plt.title('sinc(t/Tp), ' +", "* x)) return y def sinc_interpolation(x, t, T): ns = np.arange(x.size) print(ns, \"============\")", "y, '+r') plt.legend(['original', 'sinc interpolated']) plt.title('sinc(t/Tp), ' + \"Tp=\" + str(Tp)) plt.xlabel('Time/s') plt.ylabel('Amplitude')", "x) / (PI * x)) return y def sinc_interpolation(x, t, T): ns =", "# =========================test sinc definition f0 = 100 Ns = 2000 Tp = 20.0", "np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131) plt.plot(t, x, '^b') plt.plot(t2, y, '+r') plt.legend(['original', 'sinc interpolated']) plt.title('sinc(t/Tp),", "PI = np.pi x = np.array(x) y = np.where(np.abs(PI * x) < 1e-38,", "2) y1 = sinc1(t / Tp) x = np.sin(2 * PI * f0", "---------------normalized def sinc1(x): PI = np.pi x = np.array(x) y = np.where(np.abs(PI *", "x, '^b') plt.plot(t2, y, '+r') plt.legend(['original', 'sinc interpolated']) plt.title('sinc(t/Tp), ' + \"Tp=\" +", "sinc # ---------------normalized def sinc1(x): PI = np.pi x = np.array(x) y =", "/ Tp) x = np.sin(2 * PI * f0 * t) print(x.shape) y", "= np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI * x) / (PI *", "/ Ns t = np.linspace(-10, 10, Ns) t2 = np.linspace(-10, 10, Ns *", "* 2) y1 = sinc1(t / Tp) x = np.sin(2 * PI *", "sinc_interpolation(x, t2, Tp) print(y.shape, \"===\") yfft = np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131) plt.plot(t, x, '^b')", "* x) < 1e-38, 1.0, np.sin(PI * x) / (PI * x)) return", "np.arange(x.size) print(ns, \"============\") y = [] for tt in t: y.append(np.sum(x * sinc1((tt", "sinc1((tt - ns * T) / T))) return np.array(y) # =========================test sinc definition", "10, Ns) t2 = np.linspace(-10, 10, Ns * 2) y1 = sinc1(t /", "\"============\") y = [] for tt in t: y.append(np.sum(x * sinc1((tt - ns", "Ns * 2) y1 = sinc1(t / Tp) x = np.sin(2 * PI", "ns = np.arange(x.size) print(ns, \"============\") y = [] for tt in t: y.append(np.sum(x", "print(y.shape, \"===\") yfft = np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131) plt.plot(t, x, '^b') plt.plot(t2, y, '+r')", "y1 = sinc1(t / Tp) x = np.sin(2 * PI * f0 *", "import numpy as np import matplotlib.pyplot as plt PI = np.pi # =========================define", "tt in t: y.append(np.sum(x * sinc1((tt - ns * T) / T))) return", "1.0, np.sin(PI * x) / (PI * x)) return y def sinc_interpolation(x, t,", "T))) return np.array(y) # =========================test sinc definition f0 = 100 Ns = 2000", "1e-38, 1.0, np.sin(PI * x) / (PI * x)) return y def sinc_interpolation(x,", "t = np.linspace(-10, 10, Ns) t2 = np.linspace(-10, 10, Ns * 2) y1", "10, Ns * 2) y1 = sinc1(t / Tp) x = np.sin(2 *", "= sinc_interpolation(x, t2, Tp) print(y.shape, \"===\") yfft = np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131) plt.plot(t, x,", "numpy as np import matplotlib.pyplot as plt PI = np.pi # =========================define sinc", "yfft = np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131) plt.plot(t, x, '^b') plt.plot(t2, y, '+r') plt.legend(['original', 'sinc", "\"===\") yfft = np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131) plt.plot(t, x, '^b') plt.plot(t2, y, '+r') plt.legend(['original',", "x)) return y def sinc_interpolation(x, t, T): ns = np.arange(x.size) print(ns, \"============\") y", "def sinc1(x): PI = np.pi x = np.array(x) y = np.where(np.abs(PI * x)", "Tp) x = np.sin(2 * PI * f0 * t) print(x.shape) y =", "np.sin(PI * x) / (PI * x)) return y def sinc_interpolation(x, t, T):", "'^b') plt.plot(t2, y, '+r') plt.legend(['original', 'sinc interpolated']) plt.title('sinc(t/Tp), ' + \"Tp=\" + str(Tp))", "* T) / T))) return np.array(y) # =========================test sinc definition f0 = 100", "= np.arange(x.size) print(ns, \"============\") y = [] for tt in t: y.append(np.sum(x *", "f0 = 100 Ns = 2000 Tp = 20.0 / Ns t =", "# =========================define sinc # ---------------normalized def sinc1(x): PI = np.pi x = np.array(x)", "x) < 1e-38, 1.0, np.sin(PI * x) / (PI * x)) return y", "np.pi # =========================define sinc # ---------------normalized def sinc1(x): PI = np.pi x =", "2000 Tp = 20.0 / Ns t = np.linspace(-10, 10, Ns) t2 =", "as plt PI = np.pi # =========================define sinc # ---------------normalized def sinc1(x): PI", "in t: y.append(np.sum(x * sinc1((tt - ns * T) / T))) return np.array(y)", "* t) print(x.shape) y = sinc_interpolation(x, t2, Tp) print(y.shape, \"===\") yfft = np.fft.fftshift(np.fft.fft(y))", "[] for tt in t: y.append(np.sum(x * sinc1((tt - ns * T) /", "for tt in t: y.append(np.sum(x * sinc1((tt - ns * T) / T)))", "return np.array(y) # =========================test sinc definition f0 = 100 Ns = 2000 Tp", "= np.array(x) y = np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI * x)", "y = np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI * x) / (PI", "y def sinc_interpolation(x, t, T): ns = np.arange(x.size) print(ns, \"============\") y = []", "T): ns = np.arange(x.size) print(ns, \"============\") y = [] for tt in t:", "import matplotlib.pyplot as plt PI = np.pi # =========================define sinc # ---------------normalized def", "np import matplotlib.pyplot as plt PI = np.pi # =========================define sinc # ---------------normalized", "plt.plot(t, x, '^b') plt.plot(t2, y, '+r') plt.legend(['original', 'sinc interpolated']) plt.title('sinc(t/Tp), ' + \"Tp=\"", "ns * T) / T))) return np.array(y) # =========================test sinc definition f0 =", "np.array(x) y = np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI * x) /", "np.pi x = np.array(x) y = np.where(np.abs(PI * x) < 1e-38, 1.0, np.sin(PI", "T) / T))) return np.array(y) # =========================test sinc definition f0 = 100 Ns", "20.0 / Ns t = np.linspace(-10, 10, Ns) t2 = np.linspace(-10, 10, Ns", "y.append(np.sum(x * sinc1((tt - ns * T) / T))) return np.array(y) # =========================test", "Ns t = np.linspace(-10, 10, Ns) t2 = np.linspace(-10, 10, Ns * 2)", "* x) / (PI * x)) return y def sinc_interpolation(x, t, T): ns", "=========================test sinc definition f0 = 100 Ns = 2000 Tp = 20.0 /", "= [] for tt in t: y.append(np.sum(x * sinc1((tt - ns * T)", "t) print(x.shape) y = sinc_interpolation(x, t2, Tp) print(y.shape, \"===\") yfft = np.fft.fftshift(np.fft.fft(y)) plt.figure()", "* sinc1((tt - ns * T) / T))) return np.array(y) # =========================test sinc", "y = sinc_interpolation(x, t2, Tp) print(y.shape, \"===\") yfft = np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131) plt.plot(t,", "= sinc1(t / Tp) x = np.sin(2 * PI * f0 * t)", "= np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131) plt.plot(t, x, '^b') plt.plot(t2, y, '+r') plt.legend(['original', 'sinc interpolated'])", "= np.linspace(-10, 10, Ns * 2) y1 = sinc1(t / Tp) x =", "print(x.shape) y = sinc_interpolation(x, t2, Tp) print(y.shape, \"===\") yfft = np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131)", "< 1e-38, 1.0, np.sin(PI * x) / (PI * x)) return y def", "sinc_interpolation(x, t, T): ns = np.arange(x.size) print(ns, \"============\") y = [] for tt", "np.array(y) # =========================test sinc definition f0 = 100 Ns = 2000 Tp =", "Ns = 2000 Tp = 20.0 / Ns t = np.linspace(-10, 10, Ns)", "'+r') plt.legend(['original', 'sinc interpolated']) plt.title('sinc(t/Tp), ' + \"Tp=\" + str(Tp)) plt.xlabel('Time/s') plt.ylabel('Amplitude') plt.grid()", "(PI * x)) return y def sinc_interpolation(x, t, T): ns = np.arange(x.size) print(ns,", "Tp) print(y.shape, \"===\") yfft = np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131) plt.plot(t, x, '^b') plt.plot(t2, y,", "f0 * t) print(x.shape) y = sinc_interpolation(x, t2, Tp) print(y.shape, \"===\") yfft =", "= 20.0 / Ns t = np.linspace(-10, 10, Ns) t2 = np.linspace(-10, 10,", "= np.sin(2 * PI * f0 * t) print(x.shape) y = sinc_interpolation(x, t2,", "= np.pi x = np.array(x) y = np.where(np.abs(PI * x) < 1e-38, 1.0,", "t2, Tp) print(y.shape, \"===\") yfft = np.fft.fftshift(np.fft.fft(y)) plt.figure() plt.subplot(131) plt.plot(t, x, '^b') plt.plot(t2,", "t, T): ns = np.arange(x.size) print(ns, \"============\") y = [] for tt in", "plt.plot(t2, y, '+r') plt.legend(['original', 'sinc interpolated']) plt.title('sinc(t/Tp), ' + \"Tp=\" + str(Tp)) plt.xlabel('Time/s')", "= np.pi # =========================define sinc # ---------------normalized def sinc1(x): PI = np.pi x", "np.linspace(-10, 10, Ns) t2 = np.linspace(-10, 10, Ns * 2) y1 = sinc1(t", "=========================define sinc # ---------------normalized def sinc1(x): PI = np.pi x = np.array(x) y", "Ns) t2 = np.linspace(-10, 10, Ns * 2) y1 = sinc1(t / Tp)", "* f0 * t) print(x.shape) y = sinc_interpolation(x, t2, Tp) print(y.shape, \"===\") yfft", "/ (PI * x)) return y def sinc_interpolation(x, t, T): ns = np.arange(x.size)", "as np import matplotlib.pyplot as plt PI = np.pi # =========================define sinc #", "100 Ns = 2000 Tp = 20.0 / Ns t = np.linspace(-10, 10,", "print(ns, \"============\") y = [] for tt in t: y.append(np.sum(x * sinc1((tt -", "Tp = 20.0 / Ns t = np.linspace(-10, 10, Ns) t2 = np.linspace(-10,", "sinc1(x): PI = np.pi x = np.array(x) y = np.where(np.abs(PI * x) <", "PI = np.pi # =========================define sinc # ---------------normalized def sinc1(x): PI = np.pi", "* PI * f0 * t) print(x.shape) y = sinc_interpolation(x, t2, Tp) print(y.shape,", "definition f0 = 100 Ns = 2000 Tp = 20.0 / Ns t" ]
[ "workflow_dict): \"\"\"Imports a new workflow given a dictionary representing a previously exported workflow.", "@custom_exception @dict_output def cli(ctx, workflow_dict): \"\"\"Imports a new workflow given a dictionary representing", "@click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str) @pass_context @custom_exception @dict_output def cli(ctx, workflow_dict): \"\"\"Imports a new workflow", "custom_exception, dict_output, _arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str) @pass_context @custom_exception @dict_output def cli(ctx, workflow_dict): \"\"\"Imports", "def cli(ctx, workflow_dict): \"\"\"Imports a new workflow given a dictionary representing a previously", "import pass_context, json_loads from parsec.decorators import custom_exception, dict_output, _arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str) @pass_context", "type=str) @pass_context @custom_exception @dict_output def cli(ctx, workflow_dict): \"\"\"Imports a new workflow given a", "import click from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, dict_output, _arg_split", "dict_output, _arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str) @pass_context @custom_exception @dict_output def cli(ctx, workflow_dict): \"\"\"Imports a", "click from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, dict_output, _arg_split @click.command('import_workflow_dict')", "@pass_context @custom_exception @dict_output def cli(ctx, workflow_dict): \"\"\"Imports a new workflow given a dictionary", "_arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str) @pass_context @custom_exception @dict_output def cli(ctx, workflow_dict): \"\"\"Imports a new", "a new workflow given a dictionary representing a previously exported workflow. Output: \"\"\"", "<gh_stars>0 import click from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, dict_output,", "from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, dict_output, _arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\",", "@click.argument(\"workflow_dict\", type=str) @pass_context @custom_exception @dict_output def cli(ctx, workflow_dict): \"\"\"Imports a new workflow given", "from parsec.decorators import custom_exception, dict_output, _arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str) @pass_context @custom_exception @dict_output def", "\"\"\"Imports a new workflow given a dictionary representing a previously exported workflow. Output:", "json_loads from parsec.decorators import custom_exception, dict_output, _arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str) @pass_context @custom_exception @dict_output", "pass_context, json_loads from parsec.decorators import custom_exception, dict_output, _arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str) @pass_context @custom_exception", "cli(ctx, workflow_dict): \"\"\"Imports a new workflow given a dictionary representing a previously exported", "parsec.decorators import custom_exception, dict_output, _arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str) @pass_context @custom_exception @dict_output def cli(ctx,", "@dict_output def cli(ctx, workflow_dict): \"\"\"Imports a new workflow given a dictionary representing a", "parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, dict_output, _arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str)", "new workflow given a dictionary representing a previously exported workflow. Output: \"\"\" return", "import custom_exception, dict_output, _arg_split @click.command('import_workflow_dict') @click.argument(\"workflow_dict\", type=str) @pass_context @custom_exception @dict_output def cli(ctx, workflow_dict):", "workflow given a dictionary representing a previously exported workflow. Output: \"\"\" return ctx.gi.workflows.import_workflow_dict(json_loads(workflow_dict))" ]
[ ":return: reduced expression in string form or input one if further reduction was", "associativity_type): if tkn == '>' and associativity_type == 'r': # because only in", "y: x + '^' + y, variables)) while True: try: x = binary_generator.__next__()", "Bool value of an expression Warning: function will only work on correct RNP", "is a expression that doesnt need them example: (expr1)|(a)|(expr2) will be evaluated to:", "if lz == 1: return w return False def reduce_(s): \"\"\" Main reduce", "x, y: x + y, onp) def generate_general_form(self, expression=''): \"\"\" Function generates general", "True return False def check_if_signs_are_correct(self, expression=''): \"\"\" Simple filter function that checks if", "'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0) \\ or ( is_associative(tkn, 'r') and", "sequence with values to be put in coresponding positions. Also string :return: Bool", "will not return any warnings in case of errors \"\"\" zipped_list = list(zip(get_variables(expression),", "and (self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0): onp.append(stack.pop()) continue break stack.append(tkn) elif tkn ==", "def expression_to_string(s): \"\"\" Helper function to change a reduced set to human-readable form", "'>' and associativity_type == 'r': # because only in case of > it", "where between two | there is a expression that doesnt need them example:", "= None while not x: x = input('') if x: print(reduce_logical_expression(x)) else: break", "calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x): incorrect_binaries.append(x) except: break if len(incorrect_binaries) > 0: return", "expression_list: if len(some) <= 4: # we are sure that there will be", "self.correctSigns]: return False state = True for single in expression: if state: if", "'T' return result2[:-1] def trim_expression(expression): \"\"\" Basic expression trimming :param expression: takes an", "def __init__(self, expression): self.general_form = '' self.correctSigns = '~^&|/>()TF' + ascii_lowercase self.expression =", "we are sure that there will be 2 brackets + we want 1", "\"\"\" variables = [] for variable in expression: if variable in ascii_lowercase and", "= list(map(lambda x: replace_mapping(zipped_list, x), expression)) operators = {'^': lambda x, y: bool(x)", "\"0\" + c yield \"1\" + c def find_value(zipped_list, x): for a, b", "(2, 2), '|': (2, 2), '/': (2, 2), '>': (1, 2)} # <operator>", "placed correctly :param expression: expression in String form :return: Bool result of brackets", "whether this expression is correct :param expression: Infix expression :return: RPN expression \"\"\"", "binary sequence :return: generator with binary sequence \"\"\" if n == 0: yield", "in :return: list with variables from expression \"\"\" variables = [] for variable", "in len(variables) where k is in range from 2 to len(variables). It checks", "reduced expression \"\"\" expression_list = expression.split('|') if len(expression_list) == 1: return trim_expression(expression_list[0]) reduced_expressions", "in (ascii_lowercase + 'TF'): state = False else: return False else: if single", "find_value(zipped_list, x) else: return x def get_variables(expression): \"\"\" Functions filters the expression for", "x, y: not bool(x) or bool(y)} stack = [] while len(expression) > 0:", "k is in range from 2 to len(variables). It checks whether it is", "return str.join('|', reduced_expressions) def reduce_logical_expression(expression): \"\"\" Main function that is responsible for driving", "with binary sequence \"\"\" if n == 0: yield \"\" else: for c", "'/': lambda x, y: not (bool(x) and bool(y)), '>': lambda x, y: not", "Basic expression trimming :param expression: takes an expression which in most cases matches", "in s: b1 = False for e2 in s: v = concat(e1, e2)", "self.expression n = len(get_variables(expression)) correct_binaries = [] generator = generate_binary(n) current_expression = self.convert_to_onp(expression)", "expression: string expression in form (expr1)|(expr2)|(expr3) :return: reduced expression \"\"\" expression_list = expression.split('|')", "= set() b2 = False for e1 in s: b1 = False for", "was not successful \"\"\" expression_list = list(expression) variables = get_variables(str.join('|', expression_list)) binary_generator =", "expression=''): \"\"\" Helper function to determine whether brackets are placed correctly :param expression:", "we can iterate over with binary sequence and '_' :return: Merged version of", "calculates a value of an expression in reverse polish notation :param expression: Expression", "find_value(zipped_list, x): for a, b in zipped_list: if a == x: return b", "y, variables)) while True: try: x = binary_generator.__next__() if calculate_onp(onp_expression, x) != calculate_onp(onp_xor,", "= generate_binary(len(variables)) incorrect_binaries = [] some_expression = Expression('') onp_expression = some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor", "them example: (expr1)|(a)|(expr2) will be evaluated to: (expr1)|a|(expr2) :param expression: string expression in", "expressions. We assume that they do not contain '|' since in this case", "current_expression = self.convert_to_onp(expression) while True: try: x = generator.__next__() if calculate_onp(current_expression, x): correct_binaries.append(x)", "expressions_list = expression.split('|') n = len(expressions_list) for a in range(2, n + 1):", "or single in ['(', ')']: # we want ~ # we ignore brackets", "place is being replaced by '_' \"\"\" w = \"\" lz = 0", "reduces unessesary brackets. It eliminates situations where between two | there is a", "= [] while len(expression) > 0: if expression[0] in ['0', '1']: stack.append(int(expression[0])) else:", "# we ignore brackets since they are already checked continue elif single in", "x) != calculate_onp(onp_xor, x): incorrect_binaries.append(x) except: break if len(incorrect_binaries) > 0: return str.join('|',", "responsible for driving program. It calls functions to check if expression is correct", "result.add(v) b1 = b2 = True if not b1: result.add(e1) if b2: return", "'(': stack.append(tkn) elif tkn == ')': while len(stack) > 0 and stack[-1] !=", "bindings: <operator> -> (priority,arguments_number) Also string with correct signs and expression itself \"\"\"", "Set with values :return: reduced set \"\"\" result = set() b2 = False", "= self.expression if not expression: return True if [x for x in expression", "and trims brackets :return: expression with trimmed brackets \"\"\" e = Expression('') while", "v: result.add(v) b1 = b2 = True if not b1: result.add(e1) if b2:", "from string import ascii_lowercase import functools from itertools import combinations def generate_binary(n): \"\"\"", "x, y: x + '^' + y, variables) + ')' def reduce_xor(expression): \"\"\"", "= len(expressions_list) for a in range(2, n + 1): for expr in combinations(expressions_list,", "= len(get_variables(expression)) correct_binaries = [] generator = generate_binary(n) current_expression = self.convert_to_onp(expression) while True:", "there will be 2 brackets + we want 1 variable (or variable +", "and stack[-1] != '(': onp.append(stack.pop()) stack.pop() else: onp.append(tkn) while len(stack) > 0: onp.append(stack.pop())", "String expression to check :return: Bool result \"\"\" if not expression: expression =", "a String :return: String infix expression evaluated using QuineMcCluskey \"\"\" if not expression:", "functools from itertools import combinations def generate_binary(n): \"\"\" Function returns generator with binary", "\"\"\" if not expression: expression = self.expression brackets = 0 for a in", "generate_binary(n): \"\"\" Function returns generator with binary sequences of a set length :param", "'__main__': x = None while not x: x = input('') if x: print(reduce_logical_expression(x))", "[x for x in expression if x not in self.correctSigns]: return False state", "ascii_lowercase self.expression = expression.replace(' ', '') self.operators = {'~': (4, 1), '^': (3,", "string form or input one if further reduction was not possible \"\"\" expressions_list", "\"\"\" result2 = \"\" for e1 in s: result = \"\" for i", "expression def reduce_brackets(expression): \"\"\" Function that reduces unessesary brackets. It eliminates situations where", "we can iterate over with binary sequence and '_' :param s2: Sthing we", "and is semantically correct :param expression: String expression to be checked :return: Bool", "not successful \"\"\" expression_list = list(expression) variables = get_variables(str.join('|', expression_list)) binary_generator = generate_binary(len(variables))", "reduce_xor(functools.reduce(lambda x, y: '|' + x + y + '|', expressions_list)) return expression", "variable (or variable + negation) reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some) return str.join('|', reduced_expressions) def reduce_logical_expression(expression):", "binary sequence with values to be put in coresponding positions. Also string :return:", "def reduce_logical_expression(expression): \"\"\" Main function that is responsible for driving program. It calls", "from itertools import combinations def generate_binary(n): \"\"\" Function returns generator with binary sequences", "\"\"\" expression_list = expression.split('|') if len(expression_list) == 1: return trim_expression(expression_list[0]) reduced_expressions = []", "# <operator> -> (priority,arguments_number) def check_if_brackets_are_correct(self, expression=''): \"\"\" Helper function to determine whether", "expression_list = expression.split('|') if len(expression_list) == 1: return trim_expression(expression_list[0]) reduced_expressions = [] for", "return any warnings in case of errors \"\"\" zipped_list = list(zip(get_variables(expression), list(values))) expression", "correct :param expression: String expression to be checked :return: Bool result \"\"\" if", "stack.append(top) else: e1 = int(stack.pop()) e2 = int(stack.pop()) stack.append(operators[expression[0]](e2, e1)) del expression[0] return", "return 'T' return result2[:-1] def trim_expression(expression): \"\"\" Basic expression trimming :param expression: takes", "that is responsible for driving program. It calls functions to check if expression", "= str.join('|', expr) if len(reduced_sub_expression) < len(prev_expression): for var in list(expr): del expressions_list[expressions_list.index(var)]", "w return False def reduce_(s): \"\"\" Main reduce function :param s: Set with", "(priority,arguments_number) Also string with correct signs and expression itself \"\"\" def __init__(self, expression):", ":return: generator with binary sequence \"\"\" if n == 0: yield \"\" else:", "lz == 1: return w return False def reduce_(s): \"\"\" Main reduce function", "result[:-1] + ')|' if result2 == '()|': return 'T' return result2[:-1] def trim_expression(expression):", "\"\"\" expression_object = Expression(expression) if not expression_object.check_expression(): return 'ERROR' expression_in_general_form = expression_object.generate_general_form() expression_with_xor", "= generator.__next__() if calculate_onp(current_expression, x): correct_binaries.append(x) except: break set2 = reduce_(correct_binaries) self.general_form =", "interface for checking expression It calls methods to determine whether expression is correct", "= True elif single in ['(', ')']: continue else: return False return not", "(expression) and trims brackets :return: expression with trimmed brackets \"\"\" e = Expression('')", "ERROR if it is not correct \"\"\" expression_object = Expression(expression) if not expression_object.check_expression():", ":param values: binary sequence with values to be put in coresponding positions. Also", "in ['(', ')']: # we want ~ # we ignore brackets since they", "(3, 2), '&': (2, 2), '|': (2, 2), '/': (2, 2), '>': (1,", "\"&\" result2 += '(' + result[:-1] + ')|' if result2 == '()|': return", "s2): \"\"\" Helper function to reduce expressions :param s1: Sthing we can iterate", "set() b2 = False for e1 in s: b1 = False for e2", "x: return b return -1 def replace_mapping(zipped_list, x): if x == 'T': return", "-> (priority,arguments_number) def check_if_brackets_are_correct(self, expression=''): \"\"\" Helper function to determine whether brackets are", "= expression.split('|') if len(expression_list) == 1: return trim_expression(expression_list[0]) reduced_expressions = [] for some", "for a in expression: if a == '(': brackets += 1 elif a", "variable in ascii_lowercase and variable not in variables: variables.append(variable) return variables def calculate_onp(expression,", "reduced_sub_expression = reduce_tuple(expr) prev_expression = str.join('|', expr) if len(reduced_sub_expression) < len(prev_expression): for var", "y: x + '^' + y, variables) + ')' def reduce_xor(expression): \"\"\" Specific", "in s: result = \"\" for i in range(0, len(e1)): if e1[i] ==", "len(stack) > 0: onp.append(stack.pop()) return functools.reduce(lambda x, y: x + y, onp) def", "a tuple of string expressions :param expression: tuple containing expressions. We assume that", "in expression: if state: if single in self.operators and self.operators[single][1] == 1 or", "if len(incorrect_binaries) > 0: return str.join('|', expression_list) return '(' + functools.reduce(lambda x, y:", "if tkn == '>' and associativity_type == 'r': # because only in case", "any warnings in case of errors \"\"\" zipped_list = list(zip(get_variables(expression), list(values))) expression =", "are sure that there will be 2 brackets + we want 1 variable", "return expression def reduce_tuple(expression): \"\"\" Function reduces a tuple of string expressions :param", "expression=''): \"\"\" Simple filter function that checks if expression contains correct signs and", "expression as a String :return: String infix expression evaluated using QuineMcCluskey \"\"\" if", "if x not in self.correctSigns]: return False state = True for single in", "'~^&|/>()TF' + ascii_lowercase self.expression = expression.replace(' ', '') self.operators = {'~': (4, 1),", "only work on correct RNP expression and will not return any warnings in", "reduced set to human-readable form :param s: Set with values :return: String made", "break set2 = reduce_(correct_binaries) self.general_form = expression_to_string(set2) return self.general_form if __name__ == '__main__':", "Function returns generator with binary sequences of a set length :param n: length", "concat(s1, s2): \"\"\" Helper function to reduce expressions :param s1: Sthing we can", "in self.operators: if (is_associative(tkn, 'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0) \\ or", "<operator> -> (priority,arguments_number) Also string with correct signs and expression itself \"\"\" def", "(expression)|(expression)|(expression) or T (if expression is tautology) \"\"\" result2 = \"\" for e1", "function to reduce expressions :param s1: Sthing we can iterate over with binary", "input one if further reduction was not successful \"\"\" expression_list = list(expression) variables", "!= '(': onp.append(stack.pop()) stack.pop() else: onp.append(tkn) while len(stack) > 0: onp.append(stack.pop()) return functools.reduce(lambda", "two | there is a expression that doesnt need them example: (expr1)|(a)|(expr2) will", "bool(y), '/': lambda x, y: not (bool(x) and bool(y)), '>': lambda x, y:", "is correct semantically, in terms of brackets and signs :param expression: String expression", "== '__main__': x = None while not x: x = input('') if x:", "case of errors \"\"\" zipped_list = list(zip(get_variables(expression), list(values))) expression = list(map(lambda x: replace_mapping(zipped_list,", "True if not b1: result.add(e1) if b2: return reduce_(result) return result def expression_to_string(s):", "reduced_expressions = [] for some in expression_list: if len(some) <= 4: # we", "of expression operations. It contains map with bindings: <operator> -> (priority,arguments_number) Also string", "+ we want 1 variable (or variable + negation) reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some) return", "expression: String expression to check :return: Bool result \"\"\" if not expression: expression", "self.operators: if (is_associative(tkn, 'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0) \\ or (", "to: (expr1)|a|(expr2) :param expression: string expression in form (expr1)|(expr2)|(expr3) :return: reduced expression \"\"\"", "')|' if result2 == '()|': return 'T' return result2[:-1] def trim_expression(expression): \"\"\" Basic", "RPN Warning: it doesnt check whether this expression is correct :param expression: Infix", "\"\"\" Function that reduces unessesary brackets. It eliminates situations where between two |", "(1, 2)} # <operator> -> (priority,arguments_number) def check_if_brackets_are_correct(self, expression=''): \"\"\" Helper function to", "bool(x) or bool(y), '/': lambda x, y: not (bool(x) and bool(y)), '>': lambda", "Set with values :return: String made from input in pattern: (expression)|(expression)|(expression) or T", "brackets + we want 1 variable (or variable + negation) reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some)", "return expression def reduce_brackets(expression): \"\"\" Function that reduces unessesary brackets. It eliminates situations", "(expr1)|(expr2)|(expr3) :return: reduced expression \"\"\" expression_list = expression.split('|') if len(expression_list) == 1: return", "over with binary sequence and '_' :param s2: Sthing we can iterate over", "sequences of a set length :param n: length of a binary sequence :return:", "not the same as var1 xor var2 xor var3 etc :param expression: String", "if [x for x in expression if x not in self.correctSigns]: return False", "def reduce_tuple(expression): \"\"\" Function reduces a tuple of string expressions :param expression: tuple", "'|' since in this case they are a product of QuineMcCluskey algorithm :return:", "return e return reduce_brackets(expression) class Expression: \"\"\" Class designed to handle most of", "Function reduces a tuple of string expressions :param expression: tuple containing expressions. We", "It contains map with bindings: <operator> -> (priority,arguments_number) Also string with correct signs", "expression: Infix expression :return: RPN expression \"\"\" if not expression: expression = self.expression", "== 0: yield \"\" else: for c in generate_binary(n - 1): yield \"0\"", "expression \"\"\" expression_list = expression.split('|') if len(expression_list) == 1: return trim_expression(expression_list[0]) reduced_expressions =", "if expression[0] in ['0', '1']: stack.append(int(expression[0])) else: if expression[0] == '~': top =", "return False def check_if_signs_are_correct(self, expression=''): \"\"\" Simple filter function that checks if expression", "lz += 1 w += \"_\" if lz == 1: return w return", "Expression in RPN given as a string. :param values: binary sequence with values", "'0': result += '~' result += ascii_lowercase[i] + \"&\" result2 += '(' +", "most of expression operations. It contains map with bindings: <operator> -> (priority,arguments_number) Also", "n + 1): for expr in combinations(expressions_list, a): # i feel really bad", "a variable we mean any lower case character :param expression: expression to search", "Class designed to handle most of expression operations. It contains map with bindings:", "expressions :param s1: Sthing we can iterate over with binary sequence and '_'", "in string form or input one if further reduction was not possible \"\"\"", ":param expression: String expression to be reduced :return: reduced expression or ERROR if", "def reduce_(s): \"\"\" Main reduce function :param s: Set with values :return: reduced", "+= 1 elif a == ')': brackets -= 1 if brackets < 0:", "len(expressions_list) for a in range(2, n + 1): for expr in combinations(expressions_list, a):", "b2 = False for e1 in s: b1 = False for e2 in", "continue else: return False return not state def check_expression(self, expression=''): \"\"\" Higher level", "infix expression It uses QuineMcCluskey algorithm Result matches a pattern: (expression1)|(expression2)|(expression3)... :param expression:", "if e1[i] == '0': result += '~' result += ascii_lowercase[i] + \"&\" result2", "if not expression: expression = self.expression return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression) def convert_to_onp(self, expression=''):", "negation) reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some) return str.join('|', reduced_expressions) def reduce_logical_expression(expression): \"\"\" Main function that", "w += z1 else: lz += 1 w += \"_\" if lz ==", "x = None while not x: x = input('') if x: print(reduce_logical_expression(x)) else:", "reduce_brackets(expression): \"\"\" Function that reduces unessesary brackets. It eliminates situations where between two", "calls methods to determine whether expression is correct semantically, in terms of brackets", "function that is responsible for driving program. It calls functions to check if", "return True if [x for x in expression if x not in self.correctSigns]:", "if expression contains correct signs and is semantically correct :param expression: String expression", "and self.operators[single][1] == 2: # everything else than ~ state = True elif", "b1 = b2 = True if not b1: result.add(e1) if b2: return reduce_(result)", "expression operations. It contains map with bindings: <operator> -> (priority,arguments_number) Also string with", "in s: v = concat(e1, e2) if v: result.add(v) b1 = b2 =", "contains map with bindings: <operator> -> (priority,arguments_number) Also string with correct signs and", "x), expression)) operators = {'^': lambda x, y: bool(x) ^ bool(y), '&': lambda", "expression.split('|') if len(expression_list) == 1: return trim_expression(expression_list[0]) reduced_expressions = [] for some in", "str.join('|', expression_list) return '(' + functools.reduce(lambda x, y: x + '^' + y,", "c def find_value(zipped_list, x): for a, b in zipped_list: if a == x:", "expression: expression = self.expression if not expression: return True if [x for x", ":param s: Set with values :return: reduced set \"\"\" result = set() b2", "form from infix expression It uses QuineMcCluskey algorithm Result matches a pattern: (expression1)|(expression2)|(expression3)...", "brackets == 0: return True return False def check_if_signs_are_correct(self, expression=''): \"\"\" Simple filter", "expression or ERROR if it is not correct \"\"\" expression_object = Expression(expression) if", "(is_associative(tkn, 'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0) \\ or ( is_associative(tkn, 'r')", "expression[1:-1] return expression def reduce_tuple(expression): \"\"\" Function reduces a tuple of string expressions", "expression if x not in self.correctSigns]: return False state = True for single", "semantically, in terms of brackets and signs :param expression: String expression to check", "values): \"\"\" Function calculates a value of an expression in reverse polish notation", "+ y + '|', expressions_list)) return expression def reduce_brackets(expression): \"\"\" Function that reduces", "> 0: onp.append(stack.pop()) return functools.reduce(lambda x, y: x + y, onp) def generate_general_form(self,", "expression: tuple containing expressions. We assume that they do not contain '|' since", "tkn == '(': stack.append(tkn) elif tkn == ')': while len(stack) > 0 and", "+ functools.reduce(lambda x, y: x + '^' + y, variables) + ')' def", "It generates combinations of k elements in len(variables) where k is in range", "functools.reduce(lambda x, y: x + y, onp) def generate_general_form(self, expression=''): \"\"\" Function generates", "for this reduced_sub_expression = reduce_tuple(expr) prev_expression = str.join('|', expr) if len(reduced_sub_expression) < len(prev_expression):", "Warning: function will only work on correct RNP expression and will not return", "b1 = False for e2 in s: v = concat(e1, e2) if v:", "this expression is correct :param expression: Infix expression :return: RPN expression \"\"\" if", "is not the same as var1 xor var2 xor var3 etc :param expression:", "most cases matches a pattern: (expression) and trims brackets :return: expression with trimmed", "lambda x, y: bool(x) ^ bool(y), '&': lambda x, y: bool(x) and bool(y),", "stack[-1] in self.operators: if (is_associative(tkn, 'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0) \\", "x: replace_mapping(zipped_list, x), expression)) operators = {'^': lambda x, y: bool(x) ^ bool(y),", "in self.operators and self.operators[single][1] == 2: # everything else than ~ state =", "that it matches a pattern: (expr1)|(expr2)|(expr3) ... :return: reduced expression in string form", "range(2, n + 1): for expr in combinations(expressions_list, a): # i feel really", "form :param s: Set with values :return: String made from input in pattern:", "2 to len(variables). It checks whether it is not the same as var1", "and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0) \\ or ( is_associative(tkn, 'r') and (self.operators[tkn][0]", "brackets checking \"\"\" if not expression: expression = self.expression brackets = 0 for", "return variables def calculate_onp(expression, values): \"\"\" Function calculates a value of an expression", "self.operators and self.operators[single][1] == 2: # everything else than ~ state = True", "be put in coresponding positions. Also string :return: Bool value of an expression", "expression: String expression to be reduced. We assume that it matches a pattern:", "variable we mean any lower case character :param expression: expression to search in", "not expression_object.check_expression(): return 'ERROR' expression_in_general_form = expression_object.generate_general_form() expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor) <", "Also string with correct signs and expression itself \"\"\" def __init__(self, expression): self.general_form", "can iterate over with binary sequence and '_' :return: Merged version of input,", "with correct signs and expression itself \"\"\" def __init__(self, expression): self.general_form = ''", "single in expression: if state: if single in self.operators and self.operators[single][1] == 1", "onp_expression = some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y: x + '^' +", "(2, 2), '>': (1, 2)} # <operator> -> (priority,arguments_number) def check_if_brackets_are_correct(self, expression=''): \"\"\"", "we want ~ # we ignore brackets since they are already checked continue", "with binary sequence and '_' :return: Merged version of input, when certain bits", "def replace_mapping(zipped_list, x): if x == 'T': return 1 elif x == 'F':", "while True: try: x = generator.__next__() if calculate_onp(current_expression, x): correct_binaries.append(x) except: break set2", "True elif single in ['(', ')']: continue else: return False return not state", "expression to be checked :return: Bool result \"\"\" if not expression: expression =", "operators = {'^': lambda x, y: bool(x) ^ bool(y), '&': lambda x, y:", "return '(' + functools.reduce(lambda x, y: x + '^' + y, variables) +", "not expression: expression = self.expression if not expression: return True if [x for", "case they are a product of QuineMcCluskey algorithm :return: String containing reduced expression", "which in most cases matches a pattern: (expression) and trims brackets :return: expression", "\"\"\" Specific function to reduce xor expressions. It generates combinations of k elements", "y: not bool(x) or bool(y)} stack = [] while len(expression) > 0: if", "expression in reverse polish notation :param expression: Expression in RPN given as a", "filters the expression for variables and returns them As a variable we mean", "if it is not correct \"\"\" expression_object = Expression(expression) if not expression_object.check_expression(): return", "if not b1: result.add(e1) if b2: return reduce_(result) return result def expression_to_string(s): \"\"\"", "return result2[:-1] def trim_expression(expression): \"\"\" Basic expression trimming :param expression: takes an expression", "single in self.operators and self.operators[single][1] == 2: # everything else than ~ state", "x + y, onp) def generate_general_form(self, expression=''): \"\"\" Function generates general form from", "and expression[-1] == ')' and e.check_expression(expression): expression = expression[1:-1] return expression def reduce_tuple(expression):", "var3 etc :param expression: String expression to be reduced. We assume that it", "bool(stack.pop()) stack.append(top) else: e1 = int(stack.pop()) e2 = int(stack.pop()) stack.append(operators[expression[0]](e2, e1)) del expression[0]", "len(stack) > 0 and stack[-1] in self.operators: if (is_associative(tkn, 'l') and (self.operators[tkn][0] -", "reduce_brackets(expression) class Expression: \"\"\" Class designed to handle most of expression operations. It", "var2 xor var3 etc :param expression: String expression to be reduced. We assume", "for expr in combinations(expressions_list, a): # i feel really bad for this reduced_sub_expression", "for x in expression if x not in self.correctSigns]: return False state =", "len(expression) > 2 and expression[0] == '(' and expression[-1] == ')' and e.check_expression(expression):", "return 0 elif x in (ascii_lowercase + 'TF'): return find_value(zipped_list, x) else: return", "when certain bits are different this place is being replaced by '_' \"\"\"", "> 0: return str.join('|', expression_list) return '(' + functools.reduce(lambda x, y: x +", "one if further reduction was not successful \"\"\" expression_list = list(expression) variables =", "= self.expression brackets = 0 for a in expression: if a == '(':", "set2 = reduce_(correct_binaries) self.general_form = expression_to_string(set2) return self.general_form if __name__ == '__main__': x", "== '()|': return 'T' return result2[:-1] def trim_expression(expression): \"\"\" Basic expression trimming :param", "w += \"_\" if lz == 1: return w return False def reduce_(s):", "# because only in case of > it matters. return False return True", "else: return False else: if single in self.operators and self.operators[single][1] == 2: #", "\"\"\" if not expression: expression = self.expression if not expression: return True if", "be reduced. We assume that it matches a pattern: (expr1)|(expr2)|(expr3) ... :return: reduced", "infix expression to RPN Warning: it doesnt check whether this expression is correct", "a in expression: if a == '(': brackets += 1 elif a ==", "s: b1 = False for e2 in s: v = concat(e1, e2) if", "to be reduced. We assume that it matches a pattern: (expr1)|(expr2)|(expr3) ... :return:", "It checks whether it is not the same as var1 xor var2 xor", "state: if single in self.operators and self.operators[single][1] == 1 or single in ['(',", "+ '^' + y, variables)) while True: try: x = binary_generator.__next__() if calculate_onp(onp_expression,", "bool(y), '&': lambda x, y: bool(x) and bool(y), '|': lambda x, y: bool(x)", "2), '&': (2, 2), '|': (2, 2), '/': (2, 2), '>': (1, 2)}", "if not expression: return True if [x for x in expression if x", "state = True elif single in ['(', ')']: continue else: return False return", "def check_expression(self, expression=''): \"\"\" Higher level interface for checking expression It calls methods", "\"\"\" Main reduce function :param s: Set with values :return: reduced set \"\"\"", "and returns them As a variable we mean any lower case character :param", "= True for single in expression: if state: if single in self.operators and", "Merged version of input, when certain bits are different this place is being", "expression in form (expr1)|(expr2)|(expr3) :return: reduced expression \"\"\" expression_list = expression.split('|') if len(expression_list)", "= {'~': (4, 1), '^': (3, 2), '&': (2, 2), '|': (2, 2),", "\"\"\" Higher level interface for checking expression It calls methods to determine whether", "expression which in most cases matches a pattern: (expression) and trims brackets :return:", "reduce_(correct_binaries) self.general_form = expression_to_string(set2) return self.general_form if __name__ == '__main__': x = None", "iterate over with binary sequence and '_' :param s2: Sthing we can iterate", "is not correct \"\"\" expression_object = Expression(expression) if not expression_object.check_expression(): return 'ERROR' expression_in_general_form", "+ y, variables)) while True: try: x = binary_generator.__next__() if calculate_onp(onp_expression, x) !=", "for checking expression It calls methods to determine whether expression is correct semantically,", "> 2 and expression[0] == '(' and expression[-1] == ')' and e.check_expression(expression): expression", "and variable not in variables: variables.append(variable) return variables def calculate_onp(expression, values): \"\"\" Function", "self.general_form = expression_to_string(set2) return self.general_form if __name__ == '__main__': x = None while", "reduced_expressions.append(some) return str.join('|', reduced_expressions) def reduce_logical_expression(expression): \"\"\" Main function that is responsible for", "k elements in len(variables) where k is in range from 2 to len(variables).", "expression trimming :param expression: takes an expression which in most cases matches a", "and bool(y)), '>': lambda x, y: not bool(x) or bool(y)} stack = []", "e1 in s: result = \"\" for i in range(0, len(e1)): if e1[i]", "example: (expr1)|(a)|(expr2) will be evaluated to: (expr1)|a|(expr2) :param expression: string expression in form", "pattern: (expr1)|(expr2)|(expr3) ... :return: reduced expression in string form or input one if", "xor var3 etc :param expression: String expression to be reduced. We assume that", "brackets. It eliminates situations where between two | there is a expression that", "length of a binary sequence :return: generator with binary sequence \"\"\" if n", ":param expression: string expression in form (expr1)|(expr2)|(expr3) :return: reduced expression \"\"\" expression_list =", "character :param expression: expression to search in :return: list with variables from expression", "warnings in case of errors \"\"\" zipped_list = list(zip(get_variables(expression), list(values))) expression = list(map(lambda", "an expression which in most cases matches a pattern: (expression) and trims brackets", "set \"\"\" result = set() b2 = False for e1 in s: b1", "0: return str.join('|', expression_list) return '(' + functools.reduce(lambda x, y: x + '^'", "itertools import combinations def generate_binary(n): \"\"\" Function returns generator with binary sequences of", "stack[0] def is_associative(tkn, associativity_type): if tkn == '>' and associativity_type == 'r': #", "of QuineMcCluskey algorithm :return: String containing reduced expression or the input one if", "while len(stack) > 0 and stack[-1] != '(': onp.append(stack.pop()) stack.pop() else: onp.append(tkn) while", "else: lz += 1 w += \"_\" if lz == 1: return w", "expressions_list)) return expression def reduce_brackets(expression): \"\"\" Function that reduces unessesary brackets. It eliminates", "expression: Infix expression as a String :return: String infix expression evaluated using QuineMcCluskey", ":param expression: String expression to check :return: Bool result \"\"\" if not expression:", "to be reduced :return: reduced expression or ERROR if it is not correct", "0: return True return False def check_if_signs_are_correct(self, expression=''): \"\"\" Simple filter function that", "expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda x, y: '|' + x + y + '|', expressions_list))", "return trim_expression(expression_list[0]) reduced_expressions = [] for some in expression_list: if len(some) <= 4:", "a set length :param n: length of a binary sequence :return: generator with", "prev_expression = str.join('|', expr) if len(reduced_sub_expression) < len(prev_expression): for var in list(expr): del", "to handle most of expression operations. It contains map with bindings: <operator> ->", "Main reduce function :param s: Set with values :return: reduced set \"\"\" result", "if (is_associative(tkn, 'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0) \\ or ( is_associative(tkn,", "in range(2, n + 1): for expr in combinations(expressions_list, a): # i feel", "0: return False if brackets == 0: return True return False def check_if_signs_are_correct(self,", "expression: return True if [x for x in expression if x not in", "- 1): yield \"0\" + c yield \"1\" + c def find_value(zipped_list, x):", "as a String :return: String infix expression evaluated using QuineMcCluskey \"\"\" if not", "s2): if z1 == z2: w += z1 else: lz += 1 w", "String expression to be checked :return: Bool result \"\"\" if not expression: expression", "mean any lower case character :param expression: expression to search in :return: list", "generator.__next__() if calculate_onp(current_expression, x): correct_binaries.append(x) except: break set2 = reduce_(correct_binaries) self.general_form = expression_to_string(set2)", "')' def reduce_xor(expression): \"\"\" Specific function to reduce xor expressions. It generates combinations", "expression to be reduced :return: reduced expression or ERROR if it is not", "onp) def generate_general_form(self, expression=''): \"\"\" Function generates general form from infix expression It", "= '' self.correctSigns = '~^&|/>()TF' + ascii_lowercase self.expression = expression.replace(' ', '') self.operators", "\"\"\" Class designed to handle most of expression operations. It contains map with", "lambda x, y: not (bool(x) and bool(y)), '>': lambda x, y: not bool(x)", "reduce_tuple(expr) prev_expression = str.join('|', expr) if len(reduced_sub_expression) < len(prev_expression): for var in list(expr):", "human-readable form :param s: Set with values :return: String made from input in", "0 and stack[-1] in self.operators: if (is_associative(tkn, 'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <=", "bool(x) and bool(y), '|': lambda x, y: bool(x) or bool(y), '/': lambda x,", "\"\"\" Function returns generator with binary sequences of a set length :param n:", "RNP expression and will not return any warnings in case of errors \"\"\"", "(priority,arguments_number) def check_if_brackets_are_correct(self, expression=''): \"\"\" Helper function to determine whether brackets are placed", "evaluated using QuineMcCluskey \"\"\" if not expression: expression = self.expression n = len(get_variables(expression))", "0: onp.append(stack.pop()) return functools.reduce(lambda x, y: x + y, onp) def generate_general_form(self, expression=''):", "We assume that it matches a pattern: (expr1)|(expr2)|(expr3) ... :return: reduced expression in", "semantically correct :param expression: String expression to be checked :return: Bool result \"\"\"", "+= z1 else: lz += 1 w += \"_\" if lz == 1:", "4: # we are sure that there will be 2 brackets + we", "of an expression Warning: function will only work on correct RNP expression and", "elif tkn == ')': while len(stack) > 0 and stack[-1] != '(': onp.append(stack.pop())", "== ')': brackets -= 1 if brackets < 0: return False if brackets", "< 0): onp.append(stack.pop()) continue break stack.append(tkn) elif tkn == '(': stack.append(tkn) elif tkn", "stack.append(tkn) elif tkn == ')': while len(stack) > 0 and stack[-1] != '(':", "expression to check :return: Bool result \"\"\" if not expression: expression = self.expression", "variable in expression: if variable in ascii_lowercase and variable not in variables: variables.append(variable)", "expressions. It generates combinations of k elements in len(variables) where k is in", "onp.append(stack.pop()) continue break stack.append(tkn) elif tkn == '(': stack.append(tkn) elif tkn == ')':", "is semantically correct :param expression: String expression to be checked :return: Bool result", "expr in combinations(expressions_list, a): # i feel really bad for this reduced_sub_expression =", "only in case of > it matters. return False return True def concat(s1,", "< len(expression): return e return reduce_brackets(expression) class Expression: \"\"\" Class designed to handle", "b return -1 def replace_mapping(zipped_list, x): if x == 'T': return 1 elif", "Helper function to change a reduced set to human-readable form :param s: Set", "and associativity_type == 'r': # because only in case of > it matters.", "expression=''): \"\"\" Function generates general form from infix expression It uses QuineMcCluskey algorithm", "x == 'T': return 1 elif x == 'F': return 0 elif x", "expression: expression = self.expression n = len(get_variables(expression)) correct_binaries = [] generator = generate_binary(n)", "return functools.reduce(lambda x, y: x + y, onp) def generate_general_form(self, expression=''): \"\"\" Function", "between two | there is a expression that doesnt need them example: (expr1)|(a)|(expr2)", "s: v = concat(e1, e2) if v: result.add(v) b1 = b2 = True", "z1, z2 in zip(s1, s2): if z1 == z2: w += z1 else:", "successful \"\"\" expression_list = list(expression) variables = get_variables(str.join('|', expression_list)) binary_generator = generate_binary(len(variables)) incorrect_binaries", "over with binary sequence and '_' :return: Merged version of input, when certain", "tuple containing expressions. We assume that they do not contain '|' since in", "that there will be 2 brackets + we want 1 variable (or variable", "certain bits are different this place is being replaced by '_' \"\"\" w", "Helper function to determine whether brackets are placed correctly :param expression: expression in", "and expression[0] == '(' and expression[-1] == ')' and e.check_expression(expression): expression = expression[1:-1]", "get_variables(expression): \"\"\" Functions filters the expression for variables and returns them As a", "functions to check if expression is correct and then reduces expression :param expression:", ":param expression: Expression in RPN given as a string. :param values: binary sequence", "convert_to_onp(self, expression=''): \"\"\" Function converts an infix expression to RPN Warning: it doesnt", "String :return: String infix expression evaluated using QuineMcCluskey \"\"\" if not expression: expression", "variables from expression \"\"\" variables = [] for variable in expression: if variable", "a binary sequence :return: generator with binary sequence \"\"\" if n == 0:", "len(expression): return expression_with_xor e = reduce_brackets(expression_in_general_form) if len(e) < len(expression): return e return", "== 0: return True return False def check_if_signs_are_correct(self, expression=''): \"\"\" Simple filter function", "it is not the same as var1 xor var2 xor var3 etc :param", "expression: expression to search in :return: list with variables from expression \"\"\" variables", "\"\"\" e = Expression('') while len(expression) > 2 and expression[0] == '(' and", "del expression[0] return stack[0] def is_associative(tkn, associativity_type): if tkn == '>' and associativity_type", "a reduced set to human-readable form :param s: Set with values :return: String", "It eliminates situations where between two | there is a expression that doesnt", "b2: return reduce_(result) return result def expression_to_string(s): \"\"\" Helper function to change a", ":param s2: Sthing we can iterate over with binary sequence and '_' :return:", "expression: expression = self.expression stack = [] onp = [] for tkn in", "False return True def concat(s1, s2): \"\"\" Helper function to reduce expressions :param", "== ')' and e.check_expression(expression): expression = expression[1:-1] return expression def reduce_tuple(expression): \"\"\" Function", "because only in case of > it matters. return False return True def", "\"\" for e1 in s: result = \"\" for i in range(0, len(e1)):", "is being replaced by '_' \"\"\" w = \"\" lz = 0 for", "(or variable + negation) reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some) return str.join('|', reduced_expressions) def reduce_logical_expression(expression): \"\"\"", "def is_associative(tkn, associativity_type): if tkn == '>' and associativity_type == 'r': # because", "result \"\"\" if not expression: expression = self.expression if not expression: return True", "= generate_binary(n) current_expression = self.convert_to_onp(expression) while True: try: x = generator.__next__() if calculate_onp(current_expression,", "!= calculate_onp(onp_xor, x): incorrect_binaries.append(x) except: break if len(incorrect_binaries) > 0: return str.join('|', expression_list)", "s1: Sthing we can iterate over with binary sequence and '_' :param s2:", "(self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0): onp.append(stack.pop()) continue break stack.append(tkn) elif tkn == '(':", "y: not (bool(x) and bool(y)), '>': lambda x, y: not bool(x) or bool(y)}", "ascii_lowercase import functools from itertools import combinations def generate_binary(n): \"\"\" Function returns generator", "String containing reduced expression or the input one if further reduction was not", "len(prev_expression): for var in list(expr): del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda x, y: '|'", "'~': top = not bool(stack.pop()) stack.append(top) else: e1 = int(stack.pop()) e2 = int(stack.pop())", "or ( is_associative(tkn, 'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0): onp.append(stack.pop()) continue break", "1: return trim_expression(expression_list[0]) reduced_expressions = [] for some in expression_list: if len(some) <=", "x, y: bool(x) ^ bool(y), '&': lambda x, y: bool(x) and bool(y), '|':", ":param expression: Infix expression as a String :return: String infix expression evaluated using", "e1 in s: b1 = False for e2 in s: v = concat(e1,", "expression = self.expression return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression) def convert_to_onp(self, expression=''): \"\"\" Function converts", "input one if further reduction was not possible \"\"\" expressions_list = expression.split('|') n", "0 for a in expression: if a == '(': brackets += 1 elif", "can iterate over with binary sequence and '_' :param s2: Sthing we can", "check_expression(self, expression=''): \"\"\" Higher level interface for checking expression It calls methods to", "== z2: w += z1 else: lz += 1 w += \"_\" if", "{'~': (4, 1), '^': (3, 2), '&': (2, 2), '|': (2, 2), '/':", "in combinations(expressions_list, a): # i feel really bad for this reduced_sub_expression = reduce_tuple(expr)", "Expression('') while len(expression) > 2 and expression[0] == '(' and expression[-1] == ')'", "is correct and then reduces expression :param expression: String expression to be reduced", "{'^': lambda x, y: bool(x) ^ bool(y), '&': lambda x, y: bool(x) and", "# i feel really bad for this reduced_sub_expression = reduce_tuple(expr) prev_expression = str.join('|',", "function to reduce xor expressions. It generates combinations of k elements in len(variables)", ":return: list with variables from expression \"\"\" variables = [] for variable in", "Result matches a pattern: (expression1)|(expression2)|(expression3)... :param expression: Infix expression as a String :return:", "are already checked continue elif single in (ascii_lowercase + 'TF'): state = False", "return False state = True for single in expression: if state: if single", "len(stack) > 0 and stack[-1] != '(': onp.append(stack.pop()) stack.pop() else: onp.append(tkn) while len(stack)", "expression \"\"\" variables = [] for variable in expression: if variable in ascii_lowercase", "if b2: return reduce_(result) return result def expression_to_string(s): \"\"\" Helper function to change", ":param expression: expression in String form :return: Bool result of brackets checking \"\"\"", "= 0 for z1, z2 in zip(s1, s2): if z1 == z2: w", "x + '^' + y, variables) + ')' def reduce_xor(expression): \"\"\" Specific function", "if brackets < 0: return False if brackets == 0: return True return", "is_associative(tkn, 'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0): onp.append(stack.pop()) continue break stack.append(tkn) elif", "reduce_logical_expression(expression): \"\"\" Main function that is responsible for driving program. It calls functions", "\"\"\" if not expression: expression = self.expression n = len(get_variables(expression)) correct_binaries = []", "import functools from itertools import combinations def generate_binary(n): \"\"\" Function returns generator with", "if x == 'T': return 1 elif x == 'F': return 0 elif", "s2: Sthing we can iterate over with binary sequence and '_' :return: Merged", "reduces a tuple of string expressions :param expression: tuple containing expressions. We assume", "')': brackets -= 1 if brackets < 0: return False if brackets ==", "<operator> -> (priority,arguments_number) def check_if_brackets_are_correct(self, expression=''): \"\"\" Helper function to determine whether brackets", "expression_object.check_expression(): return 'ERROR' expression_in_general_form = expression_object.generate_general_form() expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor) < len(expression):", "binary sequences of a set length :param n: length of a binary sequence", "[] some_expression = Expression('') onp_expression = some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y:", "operations. It contains map with bindings: <operator> -> (priority,arguments_number) Also string with correct", "this reduced_sub_expression = reduce_tuple(expr) prev_expression = str.join('|', expr) if len(reduced_sub_expression) < len(prev_expression): for", "'^': (3, 2), '&': (2, 2), '|': (2, 2), '/': (2, 2), '>':", "returns generator with binary sequences of a set length :param n: length of", "== '(': brackets += 1 elif a == ')': brackets -= 1 if", "False for e2 in s: v = concat(e1, e2) if v: result.add(v) b1", "x + '^' + y, variables)) while True: try: x = binary_generator.__next__() if", "False def reduce_(s): \"\"\" Main reduce function :param s: Set with values :return:", "variable + negation) reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some) return str.join('|', reduced_expressions) def reduce_logical_expression(expression): \"\"\" Main", "incorrect_binaries = [] some_expression = Expression('') onp_expression = some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor = some_expression.convert_to_onp(functools.reduce(lambda", "expression and will not return any warnings in case of errors \"\"\" zipped_list", "for variable in expression: if variable in ascii_lowercase and variable not in variables:", "e return reduce_brackets(expression) class Expression: \"\"\" Class designed to handle most of expression", "1), '^': (3, 2), '&': (2, 2), '|': (2, 2), '/': (2, 2),", "to search in :return: list with variables from expression \"\"\" variables = []", "'&': (2, 2), '|': (2, 2), '/': (2, 2), '>': (1, 2)} #", "z1 == z2: w += z1 else: lz += 1 w += \"_\"", "reduced. We assume that it matches a pattern: (expr1)|(expr2)|(expr3) ... :return: reduced expression", "[] generator = generate_binary(n) current_expression = self.convert_to_onp(expression) while True: try: x = generator.__next__()", "Expression(expression) if not expression_object.check_expression(): return 'ERROR' expression_in_general_form = expression_object.generate_general_form() expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form)) if", "in reverse polish notation :param expression: Expression in RPN given as a string.", "expression :param expression: String expression to be reduced :return: reduced expression or ERROR", "reduced set \"\"\" result = set() b2 = False for e1 in s:", "'|' + x + y + '|', expressions_list)) return expression def reduce_brackets(expression): \"\"\"", "put in coresponding positions. Also string :return: Bool value of an expression Warning:", "assume that they do not contain '|' since in this case they are", "not return any warnings in case of errors \"\"\" zipped_list = list(zip(get_variables(expression), list(values)))", "x): for a, b in zipped_list: if a == x: return b return", "x in (ascii_lowercase + 'TF'): return find_value(zipped_list, x) else: return x def get_variables(expression):", "pattern: (expression1)|(expression2)|(expression3)... :param expression: Infix expression as a String :return: String infix expression", "True: try: x = generator.__next__() if calculate_onp(current_expression, x): correct_binaries.append(x) except: break set2 =", "will only work on correct RNP expression and will not return any warnings", "+ ascii_lowercase self.expression = expression.replace(' ', '') self.operators = {'~': (4, 1), '^':", "expression to search in :return: list with variables from expression \"\"\" variables =", "reduce function :param s: Set with values :return: reduced set \"\"\" result =", "not correct \"\"\" expression_object = Expression(expression) if not expression_object.check_expression(): return 'ERROR' expression_in_general_form =", "self.operators[single][1] == 2: # everything else than ~ state = True elif single", "to reduce expressions :param s1: Sthing we can iterate over with binary sequence", "generate_binary(n - 1): yield \"0\" + c yield \"1\" + c def find_value(zipped_list,", "expression: if variable in ascii_lowercase and variable not in variables: variables.append(variable) return variables", "class Expression: \"\"\" Class designed to handle most of expression operations. It contains", "~ # we ignore brackets since they are already checked continue elif single", "in coresponding positions. Also string :return: Bool value of an expression Warning: function", "or bool(y), '/': lambda x, y: not (bool(x) and bool(y)), '>': lambda x,", "== 1 or single in ['(', ')']: # we want ~ # we", "result def expression_to_string(s): \"\"\" Helper function to change a reduced set to human-readable", "(self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0) \\ or ( is_associative(tkn, 'r') and (self.operators[tkn][0] -", "if e1[i] == '_': continue if e1[i] == '0': result += '~' result", "a pattern: (expr1)|(expr2)|(expr3) ... :return: reduced expression in string form or input one", "if not expression: expression = self.expression if not expression: return True if [x", "= list(expression) variables = get_variables(str.join('|', expression_list)) binary_generator = generate_binary(len(variables)) incorrect_binaries = [] some_expression", "x): correct_binaries.append(x) except: break set2 = reduce_(correct_binaries) self.general_form = expression_to_string(set2) return self.general_form if", "self.operators = {'~': (4, 1), '^': (3, 2), '&': (2, 2), '|': (2,", "continue if e1[i] == '0': result += '~' result += ascii_lowercase[i] + \"&\"", "expression It calls methods to determine whether expression is correct semantically, in terms", "brackets < 0: return False if brackets == 0: return True return False", "e1)) del expression[0] return stack[0] def is_associative(tkn, associativity_type): if tkn == '>' and", "+ 'TF'): state = False else: return False else: if single in self.operators", "\"1\" + c def find_value(zipped_list, x): for a, b in zipped_list: if a", "return True return False def check_if_signs_are_correct(self, expression=''): \"\"\" Simple filter function that checks", "expression is correct and then reduces expression :param expression: String expression to be", "elif tkn == '(': stack.append(tkn) elif tkn == ')': while len(stack) > 0", "= self.convert_to_onp(expression) while True: try: x = generator.__next__() if calculate_onp(current_expression, x): correct_binaries.append(x) except:", "1 w += \"_\" if lz == 1: return w return False def", ":return: Bool result \"\"\" if not expression: expression = self.expression if not expression:", "check_if_signs_are_correct(self, expression=''): \"\"\" Simple filter function that checks if expression contains correct signs", "-= 1 if brackets < 0: return False if brackets == 0: return", "-> (priority,arguments_number) Also string with correct signs and expression itself \"\"\" def __init__(self,", "- self.operators[stack[-1]][0]) <= 0) \\ or ( is_associative(tkn, 'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0])", "generate_binary(n) current_expression = self.convert_to_onp(expression) while True: try: x = generator.__next__() if calculate_onp(current_expression, x):", "reduce_tuple(expression): \"\"\" Function reduces a tuple of string expressions :param expression: tuple containing", "for var in list(expr): del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda x, y: '|' +", "expression is tautology) \"\"\" result2 = \"\" for e1 in s: result =", "single in ['(', ')']: continue else: return False return not state def check_expression(self,", ":param expression: String expression to be checked :return: Bool result \"\"\" if not", "that they do not contain '|' since in this case they are a", "while len(stack) > 0 and stack[-1] in self.operators: if (is_associative(tkn, 'l') and (self.operators[tkn][0]", "search in :return: list with variables from expression \"\"\" variables = [] for", "', '') self.operators = {'~': (4, 1), '^': (3, 2), '&': (2, 2),", "= Expression('') onp_expression = some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y: x +", "xor expressions. It generates combinations of k elements in len(variables) where k is", "list with variables from expression \"\"\" variables = [] for variable in expression:", "1): for expr in combinations(expressions_list, a): # i feel really bad for this", "from input in pattern: (expression)|(expression)|(expression) or T (if expression is tautology) \"\"\" result2", "values :return: reduced set \"\"\" result = set() b2 = False for e1", "result += '~' result += ascii_lowercase[i] + \"&\" result2 += '(' + result[:-1]", "the expression for variables and returns them As a variable we mean any", "(expr1)|a|(expr2) :param expression: string expression in form (expr1)|(expr2)|(expr3) :return: reduced expression \"\"\" expression_list", "not bool(x) or bool(y)} stack = [] while len(expression) > 0: if expression[0]", ":return: Bool result of brackets checking \"\"\" if not expression: expression = self.expression", "single in (ascii_lowercase + 'TF'): state = False else: return False else: if", "range(0, len(e1)): if e1[i] == '_': continue if e1[i] == '0': result +=", "variables and returns them As a variable we mean any lower case character", "whether brackets are placed correctly :param expression: expression in String form :return: Bool", "the same as var1 xor var2 xor var3 etc :param expression: String expression", "0 for z1, z2 in zip(s1, s2): if z1 == z2: w +=", "brackets += 1 elif a == ')': brackets -= 1 if brackets <", "\\ or ( is_associative(tkn, 'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0): onp.append(stack.pop()) continue", "self.expression stack = [] onp = [] for tkn in expression: if tkn", "values: binary sequence with values to be put in coresponding positions. Also string", "self.expression return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression) def convert_to_onp(self, expression=''): \"\"\" Function converts an infix", "x def get_variables(expression): \"\"\" Functions filters the expression for variables and returns them", "lower case character :param expression: expression to search in :return: list with variables", "return -1 def replace_mapping(zipped_list, x): if x == 'T': return 1 elif x", "list(expr): del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda x, y: '|' + x + y", "(expression1)|(expression2)|(expression3)... :param expression: Infix expression as a String :return: String infix expression evaluated", "- self.operators[stack[-1]][0]) < 0): onp.append(stack.pop()) continue break stack.append(tkn) elif tkn == '(': stack.append(tkn)", "expression_object = Expression(expression) if not expression_object.check_expression(): return 'ERROR' expression_in_general_form = expression_object.generate_general_form() expression_with_xor =", ":param expression: expression to search in :return: list with variables from expression \"\"\"", "be reduced :return: reduced expression or ERROR if it is not correct \"\"\"", "__name__ == '__main__': x = None while not x: x = input('') if", "else: e1 = int(stack.pop()) e2 = int(stack.pop()) stack.append(operators[expression[0]](e2, e1)) del expression[0] return stack[0]", "not expression: expression = self.expression stack = [] onp = [] for tkn", "s: Set with values :return: String made from input in pattern: (expression)|(expression)|(expression) or", "list(map(lambda x: replace_mapping(zipped_list, x), expression)) operators = {'^': lambda x, y: bool(x) ^", "(2, 2), '/': (2, 2), '>': (1, 2)} # <operator> -> (priority,arguments_number) def", "try: x = generator.__next__() if calculate_onp(current_expression, x): correct_binaries.append(x) except: break set2 = reduce_(correct_binaries)", ":return: expression with trimmed brackets \"\"\" e = Expression('') while len(expression) > 2", "combinations of k elements in len(variables) where k is in range from 2", "if variable in ascii_lowercase and variable not in variables: variables.append(variable) return variables def", "binary sequence and '_' :param s2: Sthing we can iterate over with binary", "sequence and '_' :return: Merged version of input, when certain bits are different", "correct_binaries = [] generator = generate_binary(n) current_expression = self.convert_to_onp(expression) while True: try: x", "Function generates general form from infix expression It uses QuineMcCluskey algorithm Result matches", "n: length of a binary sequence :return: generator with binary sequence \"\"\" if", "yield \"0\" + c yield \"1\" + c def find_value(zipped_list, x): for a,", "reduce_xor(expression): \"\"\" Specific function to reduce xor expressions. It generates combinations of k", "\"\"\" if not expression: expression = self.expression stack = [] onp = []", "if further reduction was not possible \"\"\" expressions_list = expression.split('|') n = len(expressions_list)", "y: bool(x) ^ bool(y), '&': lambda x, y: bool(x) and bool(y), '|': lambda", "'_' :param s2: Sthing we can iterate over with binary sequence and '_'", "\"\" lz = 0 for z1, z2 in zip(s1, s2): if z1 ==", "being replaced by '_' \"\"\" w = \"\" lz = 0 for z1,", "need them example: (expr1)|(a)|(expr2) will be evaluated to: (expr1)|a|(expr2) :param expression: string expression", "bool(x) or bool(y)} stack = [] while len(expression) > 0: if expression[0] in", "expression: expression in String form :return: Bool result of brackets checking \"\"\" if", "QuineMcCluskey algorithm Result matches a pattern: (expression1)|(expression2)|(expression3)... :param expression: Infix expression as a", "return False return True def concat(s1, s2): \"\"\" Helper function to reduce expressions", "len(expression): return e return reduce_brackets(expression) class Expression: \"\"\" Class designed to handle most", "<= 4: # we are sure that there will be 2 brackets +", "Warning: it doesnt check whether this expression is correct :param expression: Infix expression", "break if len(incorrect_binaries) > 0: return str.join('|', expression_list) return '(' + functools.reduce(lambda x,", "onp.append(stack.pop()) stack.pop() else: onp.append(tkn) while len(stack) > 0: onp.append(stack.pop()) return functools.reduce(lambda x, y:", "as a string. :param values: binary sequence with values to be put in", "w = \"\" lz = 0 for z1, z2 in zip(s1, s2): if", "variables = [] for variable in expression: if variable in ascii_lowercase and variable", "2), '>': (1, 2)} # <operator> -> (priority,arguments_number) def check_if_brackets_are_correct(self, expression=''): \"\"\" Helper", "expression for variables and returns them As a variable we mean any lower", "var in list(expr): del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda x, y: '|' + x", "Function that reduces unessesary brackets. It eliminates situations where between two | there", "expression contains correct signs and is semantically correct :param expression: String expression to", "Infix expression as a String :return: String infix expression evaluated using QuineMcCluskey \"\"\"", "\"_\" if lz == 1: return w return False def reduce_(s): \"\"\" Main", "program. It calls functions to check if expression is correct and then reduces", "this case they are a product of QuineMcCluskey algorithm :return: String containing reduced", "if len(expression_with_xor) < len(expression): return expression_with_xor e = reduce_brackets(expression_in_general_form) if len(e) < len(expression):", "for single in expression: if state: if single in self.operators and self.operators[single][1] ==", "is_associative(tkn, associativity_type): if tkn == '>' and associativity_type == 'r': # because only", "feel really bad for this reduced_sub_expression = reduce_tuple(expr) prev_expression = str.join('|', expr) if", "return reduce_brackets(expression) class Expression: \"\"\" Class designed to handle most of expression operations.", "want ~ # we ignore brackets since they are already checked continue elif", "\"\"\" if n == 0: yield \"\" else: for c in generate_binary(n -", ":return: String infix expression evaluated using QuineMcCluskey \"\"\" if not expression: expression =", "return str.join('|', expression_list) return '(' + functools.reduce(lambda x, y: x + '^' +", "the input one if further reduction was not successful \"\"\" expression_list = list(expression)", "variables.append(variable) return variables def calculate_onp(expression, values): \"\"\" Function calculates a value of an", "expression.split('|') n = len(expressions_list) for a in range(2, n + 1): for expr", "in list(expr): del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda x, y: '|' + x +", "'_' \"\"\" w = \"\" lz = 0 for z1, z2 in zip(s1,", "')': while len(stack) > 0 and stack[-1] != '(': onp.append(stack.pop()) stack.pop() else: onp.append(tkn)", "QuineMcCluskey algorithm :return: String containing reduced expression or the input one if further", "designed to handle most of expression operations. It contains map with bindings: <operator>", "x, y: bool(x) and bool(y), '|': lambda x, y: bool(x) or bool(y), '/':", "if calculate_onp(current_expression, x): correct_binaries.append(x) except: break set2 = reduce_(correct_binaries) self.general_form = expression_to_string(set2) return", "if __name__ == '__main__': x = None while not x: x = input('')", "matches a pattern: (expression) and trims brackets :return: expression with trimmed brackets \"\"\"", "Function calculates a value of an expression in reverse polish notation :param expression:", "+ x + y + '|', expressions_list)) return expression def reduce_brackets(expression): \"\"\" Function", "\"\"\" Basic expression trimming :param expression: takes an expression which in most cases", "String form :return: Bool result of brackets checking \"\"\" if not expression: expression", "brackets \"\"\" e = Expression('') while len(expression) > 2 and expression[0] == '('", "list(zip(get_variables(expression), list(values))) expression = list(map(lambda x: replace_mapping(zipped_list, x), expression)) operators = {'^': lambda", "this place is being replaced by '_' \"\"\" w = \"\" lz =", "\"\"\" Helper function to change a reduced set to human-readable form :param s:", "a == ')': brackets -= 1 if brackets < 0: return False if", "\"\"\" Function calculates a value of an expression in reverse polish notation :param", "to reduce xor expressions. It generates combinations of k elements in len(variables) where", "['(', ')']: continue else: return False return not state def check_expression(self, expression=''): \"\"\"", "# we want ~ # we ignore brackets since they are already checked", "expression[0] == '~': top = not bool(stack.pop()) stack.append(top) else: e1 = int(stack.pop()) e2", "if brackets == 0: return True return False def check_if_signs_are_correct(self, expression=''): \"\"\" Simple", "self.check_if_brackets_are_correct(expression) def convert_to_onp(self, expression=''): \"\"\" Function converts an infix expression to RPN Warning:", "= Expression('') while len(expression) > 2 and expression[0] == '(' and expression[-1] ==", "expression: String expression to be reduced :return: reduced expression or ERROR if it", "they are already checked continue elif single in (ascii_lowercase + 'TF'): state =", "expression: takes an expression which in most cases matches a pattern: (expression) and", "\"\"\" expressions_list = expression.split('|') n = len(expressions_list) for a in range(2, n +", "we want 1 variable (or variable + negation) reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some) return str.join('|',", "+ 'TF'): return find_value(zipped_list, x) else: return x def get_variables(expression): \"\"\" Functions filters", "Main function that is responsible for driving program. It calls functions to check", "an expression Warning: function will only work on correct RNP expression and will", "self.expression if not expression: return True if [x for x in expression if", "expression or the input one if further reduction was not successful \"\"\" expression_list", "pattern: (expression) and trims brackets :return: expression with trimmed brackets \"\"\" e =", "case of > it matters. return False return True def concat(s1, s2): \"\"\"", "them As a variable we mean any lower case character :param expression: expression", "return False else: if single in self.operators and self.operators[single][1] == 2: # everything", "return reduce_xor(functools.reduce(lambda x, y: '|' + x + y + '|', expressions_list)) return", "eliminates situations where between two | there is a expression that doesnt need", "s: Set with values :return: reduced set \"\"\" result = set() b2 =", "stack = [] onp = [] for tkn in expression: if tkn in", "bool(y), '|': lambda x, y: bool(x) or bool(y), '/': lambda x, y: not", "binary sequence \"\"\" if n == 0: yield \"\" else: for c in", "')']: # we want ~ # we ignore brackets since they are already", "expression)) operators = {'^': lambda x, y: bool(x) ^ bool(y), '&': lambda x,", "filter function that checks if expression contains correct signs and is semantically correct", "i in range(0, len(e1)): if e1[i] == '_': continue if e1[i] == '0':", "'/': (2, 2), '>': (1, 2)} # <operator> -> (priority,arguments_number) def check_if_brackets_are_correct(self, expression=''):", "with variables from expression \"\"\" variables = [] for variable in expression: if", "that reduces unessesary brackets. It eliminates situations where between two | there is", "ascii_lowercase and variable not in variables: variables.append(variable) return variables def calculate_onp(expression, values): \"\"\"", "matters. return False return True def concat(s1, s2): \"\"\" Helper function to reduce", "reduce_brackets(expression_in_general_form) if len(e) < len(expression): return e return reduce_brackets(expression) class Expression: \"\"\" Class", "a expression that doesnt need them example: (expr1)|(a)|(expr2) will be evaluated to: (expr1)|a|(expr2)", "# we are sure that there will be 2 brackets + we want", "elif single in (ascii_lowercase + 'TF'): state = False else: return False else:", "> 0: if expression[0] in ['0', '1']: stack.append(int(expression[0])) else: if expression[0] == '~':", "-1 def replace_mapping(zipped_list, x): if x == 'T': return 1 elif x ==", "not expression: expression = self.expression return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression) def convert_to_onp(self, expression=''): \"\"\"", "it matches a pattern: (expr1)|(expr2)|(expr3) ... :return: reduced expression in string form or", "reverse polish notation :param expression: Expression in RPN given as a string. :param", "by '_' \"\"\" w = \"\" lz = 0 for z1, z2 in", "reduced expression or ERROR if it is not correct \"\"\" expression_object = Expression(expression)", "Specific function to reduce xor expressions. It generates combinations of k elements in", "= binary_generator.__next__() if calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x): incorrect_binaries.append(x) except: break if len(incorrect_binaries)", "get_variables(str.join('|', expression_list)) binary_generator = generate_binary(len(variables)) incorrect_binaries = [] some_expression = Expression('') onp_expression =", "signs :param expression: String expression to check :return: Bool result \"\"\" if not", "0: yield \"\" else: for c in generate_binary(n - 1): yield \"0\" +", "in expression_list: if len(some) <= 4: # we are sure that there will", "return find_value(zipped_list, x) else: return x def get_variables(expression): \"\"\" Functions filters the expression", "in (ascii_lowercase + 'TF'): return find_value(zipped_list, x) else: return x def get_variables(expression): \"\"\"", "else: return x def get_variables(expression): \"\"\" Functions filters the expression for variables and", "if expression[0] == '~': top = not bool(stack.pop()) stack.append(top) else: e1 = int(stack.pop())", "RPN expression \"\"\" if not expression: expression = self.expression stack = [] onp", "e2) if v: result.add(v) b1 = b2 = True if not b1: result.add(e1)", "it matters. return False return True def concat(s1, s2): \"\"\" Helper function to", "def calculate_onp(expression, values): \"\"\" Function calculates a value of an expression in reverse", "len(some) <= 4: # we are sure that there will be 2 brackets", "if not expression: expression = self.expression n = len(get_variables(expression)) correct_binaries = [] generator", "else than ~ state = True elif single in ['(', ')']: continue else:", "+= 1 w += \"_\" if lz == 1: return w return False", "'F': return 0 elif x in (ascii_lowercase + 'TF'): return find_value(zipped_list, x) else:", "z2 in zip(s1, s2): if z1 == z2: w += z1 else: lz", "= self.expression stack = [] onp = [] for tkn in expression: if", "possible \"\"\" expressions_list = expression.split('|') n = len(expressions_list) for a in range(2, n", "b2 = True if not b1: result.add(e1) if b2: return reduce_(result) return result", "c in generate_binary(n - 1): yield \"0\" + c yield \"1\" + c", "else: reduced_expressions.append(some) return str.join('|', reduced_expressions) def reduce_logical_expression(expression): \"\"\" Main function that is responsible", "in expression: if tkn in self.operators: while len(stack) > 0 and stack[-1] in", "+ \"&\" result2 += '(' + result[:-1] + ')|' if result2 == '()|':", "brackets since they are already checked continue elif single in (ascii_lowercase + 'TF'):", "< 0: return False if brackets == 0: return True return False def", "['(', ')']: # we want ~ # we ignore brackets since they are", "continue break stack.append(tkn) elif tkn == '(': stack.append(tkn) elif tkn == ')': while", "+ ')|' if result2 == '()|': return 'T' return result2[:-1] def trim_expression(expression): \"\"\"", "QuineMcCluskey \"\"\" if not expression: expression = self.expression n = len(get_variables(expression)) correct_binaries =", "String expression to be reduced :return: reduced expression or ERROR if it is", "reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor) < len(expression): return expression_with_xor e = reduce_brackets(expression_in_general_form) if len(e) <", "1 or single in ['(', ')']: # we want ~ # we ignore", "= expression_to_string(set2) return self.general_form if __name__ == '__main__': x = None while not", "not state def check_expression(self, expression=''): \"\"\" Higher level interface for checking expression It", ":param s1: Sthing we can iterate over with binary sequence and '_' :param", "= 0 for a in expression: if a == '(': brackets += 1", "result2 == '()|': return 'T' return result2[:-1] def trim_expression(expression): \"\"\" Basic expression trimming", "len(variables) where k is in range from 2 to len(variables). It checks whether", "expression It uses QuineMcCluskey algorithm Result matches a pattern: (expression1)|(expression2)|(expression3)... :param expression: Infix", "expression = list(map(lambda x: replace_mapping(zipped_list, x), expression)) operators = {'^': lambda x, y:", "values :return: String made from input in pattern: (expression)|(expression)|(expression) or T (if expression", "in most cases matches a pattern: (expression) and trims brackets :return: expression with", "^ bool(y), '&': lambda x, y: bool(x) and bool(y), '|': lambda x, y:", "determine whether brackets are placed correctly :param expression: expression in String form :return:", "reduced_expressions) def reduce_logical_expression(expression): \"\"\" Main function that is responsible for driving program. It", "(expr1)|(expr2)|(expr3) ... :return: reduced expression in string form or input one if further", "state def check_expression(self, expression=''): \"\"\" Higher level interface for checking expression It calls", "not possible \"\"\" expressions_list = expression.split('|') n = len(expressions_list) for a in range(2,", "different this place is being replaced by '_' \"\"\" w = \"\" lz", "< len(expression): return expression_with_xor e = reduce_brackets(expression_in_general_form) if len(e) < len(expression): return e", "return x def get_variables(expression): \"\"\" Functions filters the expression for variables and returns", "result2 = \"\" for e1 in s: result = \"\" for i in", "+ result[:-1] + ')|' if result2 == '()|': return 'T' return result2[:-1] def", "expression=''): \"\"\" Function converts an infix expression to RPN Warning: it doesnt check", "was not possible \"\"\" expressions_list = expression.split('|') n = len(expressions_list) for a in", "expression is correct semantically, in terms of brackets and signs :param expression: String", "and e.check_expression(expression): expression = expression[1:-1] return expression def reduce_tuple(expression): \"\"\" Function reduces a", "expression_list = list(expression) variables = get_variables(str.join('|', expression_list)) binary_generator = generate_binary(len(variables)) incorrect_binaries = []", "as var1 xor var2 xor var3 etc :param expression: String expression to be", "import ascii_lowercase import functools from itertools import combinations def generate_binary(n): \"\"\" Function returns", "general form from infix expression It uses QuineMcCluskey algorithm Result matches a pattern:", "def generate_binary(n): \"\"\" Function returns generator with binary sequences of a set length", "reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some) return str.join('|', reduced_expressions) def reduce_logical_expression(expression): \"\"\" Main function that is", "not expression: return True if [x for x in expression if x not", "in ['(', ')']: continue else: return False return not state def check_expression(self, expression=''):", "variables)) while True: try: x = binary_generator.__next__() if calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x):", "x == 'F': return 0 elif x in (ascii_lowercase + 'TF'): return find_value(zipped_list,", "y, variables) + ')' def reduce_xor(expression): \"\"\" Specific function to reduce xor expressions.", "given as a string. :param values: binary sequence with values to be put", "Bool result \"\"\" if not expression: expression = self.expression return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression)", "== 1: return trim_expression(expression_list[0]) reduced_expressions = [] for some in expression_list: if len(some)", "in case of > it matters. return False return True def concat(s1, s2):", "== 2: # everything else than ~ state = True elif single in", "e2 in s: v = concat(e1, e2) if v: result.add(v) b1 = b2", "or bool(y)} stack = [] while len(expression) > 0: if expression[0] in ['0',", "'|': (2, 2), '/': (2, 2), '>': (1, 2)} # <operator> -> (priority,arguments_number)", "brackets -= 1 if brackets < 0: return False if brackets == 0:", "\"\"\" Simple filter function that checks if expression contains correct signs and is", "len(e) < len(expression): return e return reduce_brackets(expression) class Expression: \"\"\" Class designed to", "= self.expression return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression) def convert_to_onp(self, expression=''): \"\"\" Function converts an", "1 elif a == ')': brackets -= 1 if brackets < 0: return", "level interface for checking expression It calls methods to determine whether expression is", "function to determine whether brackets are placed correctly :param expression: expression in String", "notation :param expression: Expression in RPN given as a string. :param values: binary", "== '0': result += '~' result += ascii_lowercase[i] + \"&\" result2 += '('", "= some_expression.convert_to_onp(functools.reduce(lambda x, y: x + '^' + y, variables)) while True: try:", "ascii_lowercase[i] + \"&\" result2 += '(' + result[:-1] + ')|' if result2 ==", "+ negation) reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some) return str.join('|', reduced_expressions) def reduce_logical_expression(expression): \"\"\" Main function", "= [] onp = [] for tkn in expression: if tkn in self.operators:", "= int(stack.pop()) stack.append(operators[expression[0]](e2, e1)) del expression[0] return stack[0] def is_associative(tkn, associativity_type): if tkn", "Bool result of brackets checking \"\"\" if not expression: expression = self.expression brackets", "\"\"\" Helper function to determine whether brackets are placed correctly :param expression: expression", "checked :return: Bool result \"\"\" if not expression: expression = self.expression if not", "be evaluated to: (expr1)|a|(expr2) :param expression: string expression in form (expr1)|(expr2)|(expr3) :return: reduced", "and '_' :param s2: Sthing we can iterate over with binary sequence and", "of > it matters. return False return True def concat(s1, s2): \"\"\" Helper", "if n == 0: yield \"\" else: for c in generate_binary(n - 1):", "and signs :param expression: String expression to check :return: Bool result \"\"\" if", "True for single in expression: if state: if single in self.operators and self.operators[single][1]", "False for e1 in s: b1 = False for e2 in s: v", "x = generator.__next__() if calculate_onp(current_expression, x): correct_binaries.append(x) except: break set2 = reduce_(correct_binaries) self.general_form", "[] for tkn in expression: if tkn in self.operators: while len(stack) > 0", "made from input in pattern: (expression)|(expression)|(expression) or T (if expression is tautology) \"\"\"", "= reduce_(correct_binaries) self.general_form = expression_to_string(set2) return self.general_form if __name__ == '__main__': x =", "is tautology) \"\"\" result2 = \"\" for e1 in s: result = \"\"", "of string expressions :param expression: tuple containing expressions. We assume that they do", "== x: return b return -1 def replace_mapping(zipped_list, x): if x == 'T':", "with values to be put in coresponding positions. Also string :return: Bool value", "elif x in (ascii_lowercase + 'TF'): return find_value(zipped_list, x) else: return x def", "result \"\"\" if not expression: expression = self.expression return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression) def", "form (expr1)|(expr2)|(expr3) :return: reduced expression \"\"\" expression_list = expression.split('|') if len(expression_list) == 1:", ":return: Bool result \"\"\" if not expression: expression = self.expression return self.check_if_signs_are_correct(expression) and", "expr) if len(reduced_sub_expression) < len(prev_expression): for var in list(expr): del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return", "function to change a reduced set to human-readable form :param s: Set with", "expression: Expression in RPN given as a string. :param values: binary sequence with", "+ ')' def reduce_xor(expression): \"\"\" Specific function to reduce xor expressions. It generates", "+ y, variables) + ')' def reduce_xor(expression): \"\"\" Specific function to reduce xor", "pattern: (expression)|(expression)|(expression) or T (if expression is tautology) \"\"\" result2 = \"\" for", "String made from input in pattern: (expression)|(expression)|(expression) or T (if expression is tautology)", "int(stack.pop()) e2 = int(stack.pop()) stack.append(operators[expression[0]](e2, e1)) del expression[0] return stack[0] def is_associative(tkn, associativity_type):", "trim_expression(expression_list[0]) reduced_expressions = [] for some in expression_list: if len(some) <= 4: #", "in form (expr1)|(expr2)|(expr3) :return: reduced expression \"\"\" expression_list = expression.split('|') if len(expression_list) ==", "reduce expressions :param s1: Sthing we can iterate over with binary sequence and", "y: bool(x) or bool(y), '/': lambda x, y: not (bool(x) and bool(y)), '>':", "x) else: return x def get_variables(expression): \"\"\" Functions filters the expression for variables", "== '>' and associativity_type == 'r': # because only in case of >", "= expression_object.generate_general_form() expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor) < len(expression): return expression_with_xor e =", "brackets = 0 for a in expression: if a == '(': brackets +=", "As a variable we mean any lower case character :param expression: expression to", "brackets are placed correctly :param expression: expression in String form :return: Bool result", "since in this case they are a product of QuineMcCluskey algorithm :return: String", "replace_mapping(zipped_list, x), expression)) operators = {'^': lambda x, y: bool(x) ^ bool(y), '&':", "lz = 0 for z1, z2 in zip(s1, s2): if z1 == z2:", "is correct :param expression: Infix expression :return: RPN expression \"\"\" if not expression:", "len(e1)): if e1[i] == '_': continue if e1[i] == '0': result += '~'", "== '(' and expression[-1] == ')' and e.check_expression(expression): expression = expression[1:-1] return expression", "return not state def check_expression(self, expression=''): \"\"\" Higher level interface for checking expression", "return stack[0] def is_associative(tkn, associativity_type): if tkn == '>' and associativity_type == 'r':", "matches a pattern: (expr1)|(expr2)|(expr3) ... :return: reduced expression in string form or input", "Simple filter function that checks if expression contains correct signs and is semantically", "else: for c in generate_binary(n - 1): yield \"0\" + c yield \"1\"", "> it matters. return False return True def concat(s1, s2): \"\"\" Helper function", "elements in len(variables) where k is in range from 2 to len(variables). It", "'ERROR' expression_in_general_form = expression_object.generate_general_form() expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor) < len(expression): return expression_with_xor", "while True: try: x = binary_generator.__next__() if calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x): incorrect_binaries.append(x)", "in RPN given as a string. :param values: binary sequence with values to", "return 'ERROR' expression_in_general_form = expression_object.generate_general_form() expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor) < len(expression): return", "'>': lambda x, y: not bool(x) or bool(y)} stack = [] while len(expression)", "\"\"\" Main function that is responsible for driving program. It calls functions to", "on correct RNP expression and will not return any warnings in case of", "generates combinations of k elements in len(variables) where k is in range from", "variable not in variables: variables.append(variable) return variables def calculate_onp(expression, values): \"\"\" Function calculates", "= \"\" for e1 in s: result = \"\" for i in range(0,", "+ c yield \"1\" + c def find_value(zipped_list, x): for a, b in", ":return: reduced expression \"\"\" expression_list = expression.split('|') if len(expression_list) == 1: return trim_expression(expression_list[0])", "onp.append(stack.pop()) return functools.reduce(lambda x, y: x + y, onp) def generate_general_form(self, expression=''): \"\"\"", "length :param n: length of a binary sequence :return: generator with binary sequence", "for tkn in expression: if tkn in self.operators: while len(stack) > 0 and", "\"\" for i in range(0, len(e1)): if e1[i] == '_': continue if e1[i]", "'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0): onp.append(stack.pop()) continue break stack.append(tkn) elif tkn", "a pattern: (expression1)|(expression2)|(expression3)... :param expression: Infix expression as a String :return: String infix", "in zipped_list: if a == x: return b return -1 def replace_mapping(zipped_list, x):", "or T (if expression is tautology) \"\"\" result2 = \"\" for e1 in", "Infix expression :return: RPN expression \"\"\" if not expression: expression = self.expression stack", "expression_to_string(set2) return self.general_form if __name__ == '__main__': x = None while not x:", "set to human-readable form :param s: Set with values :return: String made from", "binary sequence and '_' :return: Merged version of input, when certain bits are", "there is a expression that doesnt need them example: (expr1)|(a)|(expr2) will be evaluated", "= \"\" for i in range(0, len(e1)): if e1[i] == '_': continue if", "checks whether it is not the same as var1 xor var2 xor var3", "of a binary sequence :return: generator with binary sequence \"\"\" if n ==", "b1: result.add(e1) if b2: return reduce_(result) return result def expression_to_string(s): \"\"\" Helper function", "in self.operators and self.operators[single][1] == 1 or single in ['(', ')']: # we", "brackets and signs :param expression: String expression to check :return: Bool result \"\"\"", "return False def reduce_(s): \"\"\" Main reduce function :param s: Set with values", "sequence \"\"\" if n == 0: yield \"\" else: for c in generate_binary(n", "expression: expression = self.expression brackets = 0 for a in expression: if a", "be checked :return: Bool result \"\"\" if not expression: expression = self.expression if", "converts an infix expression to RPN Warning: it doesnt check whether this expression", "map with bindings: <operator> -> (priority,arguments_number) Also string with correct signs and expression", "def find_value(zipped_list, x): for a, b in zipped_list: if a == x: return", "bits are different this place is being replaced by '_' \"\"\" w =", "if single in self.operators and self.operators[single][1] == 1 or single in ['(', ')']:", "def get_variables(expression): \"\"\" Functions filters the expression for variables and returns them As", "containing expressions. We assume that they do not contain '|' since in this", "'TF'): state = False else: return False else: if single in self.operators and", "Sthing we can iterate over with binary sequence and '_' :return: Merged version", "to check :return: Bool result \"\"\" if not expression: expression = self.expression return", "')']: continue else: return False return not state def check_expression(self, expression=''): \"\"\" Higher", "from 2 to len(variables). It checks whether it is not the same as", "\"\"\" Function generates general form from infix expression It uses QuineMcCluskey algorithm Result", "that checks if expression contains correct signs and is semantically correct :param expression:", "[] for some in expression_list: if len(some) <= 4: # we are sure", "z2: w += z1 else: lz += 1 w += \"_\" if lz", "takes an expression which in most cases matches a pattern: (expression) and trims", "some_expression = Expression('') onp_expression = some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y: x", "x, y: x + '^' + y, variables)) while True: try: x =", "zip(s1, s2): if z1 == z2: w += z1 else: lz += 1", "return w return False def reduce_(s): \"\"\" Main reduce function :param s: Set", "'|': lambda x, y: bool(x) or bool(y), '/': lambda x, y: not (bool(x)", "if v: result.add(v) b1 = b2 = True if not b1: result.add(e1) if", "where k is in range from 2 to len(variables). It checks whether it", "version of input, when certain bits are different this place is being replaced", "list(values))) expression = list(map(lambda x: replace_mapping(zipped_list, x), expression)) operators = {'^': lambda x,", "'^' + y, variables) + ')' def reduce_xor(expression): \"\"\" Specific function to reduce", "signs and expression itself \"\"\" def __init__(self, expression): self.general_form = '' self.correctSigns =", "while len(expression) > 2 and expression[0] == '(' and expression[-1] == ')' and", "\"\"\" Function converts an infix expression to RPN Warning: it doesnt check whether", "brackets :return: expression with trimmed brackets \"\"\" e = Expression('') while len(expression) >", "further reduction was not possible \"\"\" expressions_list = expression.split('|') n = len(expressions_list) for", "e1[i] == '_': continue if e1[i] == '0': result += '~' result +=", "\"\" else: for c in generate_binary(n - 1): yield \"0\" + c yield", "in zip(s1, s2): if z1 == z2: w += z1 else: lz +=", "any lower case character :param expression: expression to search in :return: list with", "return self.general_form if __name__ == '__main__': x = None while not x: x", "self.general_form if __name__ == '__main__': x = None while not x: x =", "trimming :param expression: takes an expression which in most cases matches a pattern:", "e = reduce_brackets(expression_in_general_form) if len(e) < len(expression): return e return reduce_brackets(expression) class Expression:", "set length :param n: length of a binary sequence :return: generator with binary", ":return: reduced set \"\"\" result = set() b2 = False for e1 in", "if a == x: return b return -1 def replace_mapping(zipped_list, x): if x", "onp = [] for tkn in expression: if tkn in self.operators: while len(stack)", "0): onp.append(stack.pop()) continue break stack.append(tkn) elif tkn == '(': stack.append(tkn) elif tkn ==", "not (bool(x) and bool(y)), '>': lambda x, y: not bool(x) or bool(y)} stack", "expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda x, y: '|' + x + y + '|',", "checking \"\"\" if not expression: expression = self.expression brackets = 0 for a", "and stack[-1] in self.operators: if (is_associative(tkn, 'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <= 0)", "value of an expression in reverse polish notation :param expression: Expression in RPN", "n == 0: yield \"\" else: for c in generate_binary(n - 1): yield", "a == '(': brackets += 1 elif a == ')': brackets -= 1", "= {'^': lambda x, y: bool(x) ^ bool(y), '&': lambda x, y: bool(x)", "tkn == ')': while len(stack) > 0 and stack[-1] != '(': onp.append(stack.pop()) stack.pop()", "v = concat(e1, e2) if v: result.add(v) b1 = b2 = True if", "in range from 2 to len(variables). It checks whether it is not the", "calls functions to check if expression is correct and then reduces expression :param", "methods to determine whether expression is correct semantically, in terms of brackets and", "x): incorrect_binaries.append(x) except: break if len(incorrect_binaries) > 0: return str.join('|', expression_list) return '('", "lambda x, y: not bool(x) or bool(y)} stack = [] while len(expression) >", "of a set length :param n: length of a binary sequence :return: generator", "2: # everything else than ~ state = True elif single in ['(',", "y, onp) def generate_general_form(self, expression=''): \"\"\" Function generates general form from infix expression", "further reduction was not successful \"\"\" expression_list = list(expression) variables = get_variables(str.join('|', expression_list))", "if len(reduced_sub_expression) < len(prev_expression): for var in list(expr): del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda", "tkn in expression: if tkn in self.operators: while len(stack) > 0 and stack[-1]", "in expression if x not in self.correctSigns]: return False state = True for", "trimmed brackets \"\"\" e = Expression('') while len(expression) > 2 and expression[0] ==", "e1[i] == '0': result += '~' result += ascii_lowercase[i] + \"&\" result2 +=", "generator with binary sequence \"\"\" if n == 0: yield \"\" else: for", "zipped_list: if a == x: return b return -1 def replace_mapping(zipped_list, x): if", "onp.append(tkn) while len(stack) > 0: onp.append(stack.pop()) return functools.reduce(lambda x, y: x + y,", "replace_mapping(zipped_list, x): if x == 'T': return 1 elif x == 'F': return", "'|', expressions_list)) return expression def reduce_brackets(expression): \"\"\" Function that reduces unessesary brackets. It", "expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor) < len(expression): return expression_with_xor e = reduce_brackets(expression_in_general_form) if", "[] for variable in expression: if variable in ascii_lowercase and variable not in", "expression to RPN Warning: it doesnt check whether this expression is correct :param", "in terms of brackets and signs :param expression: String expression to check :return:", "else: onp.append(tkn) while len(stack) > 0: onp.append(stack.pop()) return functools.reduce(lambda x, y: x +", "return reduce_(result) return result def expression_to_string(s): \"\"\" Helper function to change a reduced", "or input one if further reduction was not possible \"\"\" expressions_list = expression.split('|')", ":return: reduced expression or ERROR if it is not correct \"\"\" expression_object =", "= False for e1 in s: b1 = False for e2 in s:", "expression = self.expression brackets = 0 for a in expression: if a ==", "self.expression = expression.replace(' ', '') self.operators = {'~': (4, 1), '^': (3, 2),", "1 if brackets < 0: return False if brackets == 0: return True", "combinations(expressions_list, a): # i feel really bad for this reduced_sub_expression = reduce_tuple(expr) prev_expression", "+= '~' result += ascii_lowercase[i] + \"&\" result2 += '(' + result[:-1] +", "single in self.operators and self.operators[single][1] == 1 or single in ['(', ')']: #", "expression evaluated using QuineMcCluskey \"\"\" if not expression: expression = self.expression n =", "return True def concat(s1, s2): \"\"\" Helper function to reduce expressions :param s1:", "'^' + y, variables)) while True: try: x = binary_generator.__next__() if calculate_onp(onp_expression, x)", "if tkn in self.operators: while len(stack) > 0 and stack[-1] in self.operators: if", "expression = expression[1:-1] return expression def reduce_tuple(expression): \"\"\" Function reduces a tuple of", "state = False else: return False else: if single in self.operators and self.operators[single][1]", "if expression is correct and then reduces expression :param expression: String expression to", "or the input one if further reduction was not successful \"\"\" expression_list =", "if len(e) < len(expression): return e return reduce_brackets(expression) class Expression: \"\"\" Class designed", "False def check_if_signs_are_correct(self, expression=''): \"\"\" Simple filter function that checks if expression contains", "generate_binary(len(variables)) incorrect_binaries = [] some_expression = Expression('') onp_expression = some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor =", "and then reduces expression :param expression: String expression to be reduced :return: reduced", "with binary sequence and '_' :param s2: Sthing we can iterate over with", "'' self.correctSigns = '~^&|/>()TF' + ascii_lowercase self.expression = expression.replace(' ', '') self.operators =", "for e2 in s: v = concat(e1, e2) if v: result.add(v) b1 =", "> 0 and stack[-1] in self.operators: if (is_associative(tkn, 'l') and (self.operators[tkn][0] - self.operators[stack[-1]][0])", ":param s: Set with values :return: String made from input in pattern: (expression)|(expression)|(expression)", "\"\"\" Functions filters the expression for variables and returns them As a variable", "(4, 1), '^': (3, 2), '&': (2, 2), '|': (2, 2), '/': (2,", "using QuineMcCluskey \"\"\" if not expression: expression = self.expression n = len(get_variables(expression)) correct_binaries", "to human-readable form :param s: Set with values :return: String made from input", "x, y: not (bool(x) and bool(y)), '>': lambda x, y: not bool(x) or", "to determine whether expression is correct semantically, in terms of brackets and signs", "= [] for some in expression_list: if len(some) <= 4: # we are", "try: x = binary_generator.__next__() if calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x): incorrect_binaries.append(x) except: break", "(expr1)|(a)|(expr2) will be evaluated to: (expr1)|a|(expr2) :param expression: string expression in form (expr1)|(expr2)|(expr3)", "change a reduced set to human-readable form :param s: Set with values :return:", "c yield \"1\" + c def find_value(zipped_list, x): for a, b in zipped_list:", "since they are already checked continue elif single in (ascii_lowercase + 'TF'): state", "expression_list)) binary_generator = generate_binary(len(variables)) incorrect_binaries = [] some_expression = Expression('') onp_expression = some_expression.convert_to_onp(str.join('|',", "if calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x): incorrect_binaries.append(x) except: break if len(incorrect_binaries) > 0:", "True: try: x = binary_generator.__next__() if calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x): incorrect_binaries.append(x) except:", "correct and then reduces expression :param expression: String expression to be reduced :return:", ":return: Bool value of an expression Warning: function will only work on correct", "and self.operators[single][1] == 1 or single in ['(', ')']: # we want ~", "= b2 = True if not b1: result.add(e1) if b2: return reduce_(result) return", "and will not return any warnings in case of errors \"\"\" zipped_list =", "elif x == 'F': return 0 elif x in (ascii_lowercase + 'TF'): return", "expression_with_xor e = reduce_brackets(expression_in_general_form) if len(e) < len(expression): return e return reduce_brackets(expression) class", "a == x: return b return -1 def replace_mapping(zipped_list, x): if x ==", "def check_if_brackets_are_correct(self, expression=''): \"\"\" Helper function to determine whether brackets are placed correctly", "and bool(y), '|': lambda x, y: bool(x) or bool(y), '/': lambda x, y:", "expression \"\"\" if not expression: expression = self.expression stack = [] onp =", "y: '|' + x + y + '|', expressions_list)) return expression def reduce_brackets(expression):", "1 elif x == 'F': return 0 elif x in (ascii_lowercase + 'TF'):", "1 variable (or variable + negation) reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some) return str.join('|', reduced_expressions) def", "bad for this reduced_sub_expression = reduce_tuple(expr) prev_expression = str.join('|', expr) if len(reduced_sub_expression) <", "Function converts an infix expression to RPN Warning: it doesnt check whether this", ":param expression: tuple containing expressions. We assume that they do not contain '|'", "want 1 variable (or variable + negation) reduced_expressions.append(trim_expression(some)) else: reduced_expressions.append(some) return str.join('|', reduced_expressions)", "string with correct signs and expression itself \"\"\" def __init__(self, expression): self.general_form =", "expression itself \"\"\" def __init__(self, expression): self.general_form = '' self.correctSigns = '~^&|/>()TF' +", "e1 = int(stack.pop()) e2 = int(stack.pop()) stack.append(operators[expression[0]](e2, e1)) del expression[0] return stack[0] def", "of errors \"\"\" zipped_list = list(zip(get_variables(expression), list(values))) expression = list(map(lambda x: replace_mapping(zipped_list, x),", "= [] generator = generate_binary(n) current_expression = self.convert_to_onp(expression) while True: try: x =", "positions. Also string :return: Bool value of an expression Warning: function will only", "an expression in reverse polish notation :param expression: Expression in RPN given as", "result.add(e1) if b2: return reduce_(result) return result def expression_to_string(s): \"\"\" Helper function to", "= list(zip(get_variables(expression), list(values))) expression = list(map(lambda x: replace_mapping(zipped_list, x), expression)) operators = {'^':", "... :return: reduced expression in string form or input one if further reduction", "a value of an expression in reverse polish notation :param expression: Expression in", "'r': # because only in case of > it matters. return False return", "if a == '(': brackets += 1 elif a == ')': brackets -=", "= reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor) < len(expression): return expression_with_xor e = reduce_brackets(expression_in_general_form) if len(e)", "expression: expression = self.expression return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression) def convert_to_onp(self, expression=''): \"\"\" Function", "and '_' :return: Merged version of input, when certain bits are different this", "False return not state def check_expression(self, expression=''): \"\"\" Higher level interface for checking", "in generate_binary(n - 1): yield \"0\" + c yield \"1\" + c def", "generates general form from infix expression It uses QuineMcCluskey algorithm Result matches a", "reduce xor expressions. It generates combinations of k elements in len(variables) where k", "['0', '1']: stack.append(int(expression[0])) else: if expression[0] == '~': top = not bool(stack.pop()) stack.append(top)", "= not bool(stack.pop()) stack.append(top) else: e1 = int(stack.pop()) e2 = int(stack.pop()) stack.append(operators[expression[0]](e2, e1))", "binary_generator = generate_binary(len(variables)) incorrect_binaries = [] some_expression = Expression('') onp_expression = some_expression.convert_to_onp(str.join('|', expression_list))", "expression[0] return stack[0] def is_associative(tkn, associativity_type): if tkn == '>' and associativity_type ==", "returns them As a variable we mean any lower case character :param expression:", "def reduce_brackets(expression): \"\"\" Function that reduces unessesary brackets. It eliminates situations where between", "if not expression_object.check_expression(): return 'ERROR' expression_in_general_form = expression_object.generate_general_form() expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor)", "associativity_type == 'r': # because only in case of > it matters. return", "= some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y: x + '^' + y,", "x + y + '|', expressions_list)) return expression def reduce_brackets(expression): \"\"\" Function that", "stack.append(operators[expression[0]](e2, e1)) del expression[0] return stack[0] def is_associative(tkn, associativity_type): if tkn == '>'", "onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y: x + '^' + y, variables)) while True:", "expression.replace(' ', '') self.operators = {'~': (4, 1), '^': (3, 2), '&': (2,", "return result def expression_to_string(s): \"\"\" Helper function to change a reduced set to", "continue elif single in (ascii_lowercase + 'TF'): state = False else: return False", "expression_list)) onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y: x + '^' + y, variables)) while", "y: x + y, onp) def generate_general_form(self, expression=''): \"\"\" Function generates general form", "Functions filters the expression for variables and returns them As a variable we", "if result2 == '()|': return 'T' return result2[:-1] def trim_expression(expression): \"\"\" Basic expression", "= reduce_brackets(expression_in_general_form) if len(e) < len(expression): return e return reduce_brackets(expression) class Expression: \"\"\"", "2), '|': (2, 2), '/': (2, 2), '>': (1, 2)} # <operator> ->", "bool(y)), '>': lambda x, y: not bool(x) or bool(y)} stack = [] while", "'1']: stack.append(int(expression[0])) else: if expression[0] == '~': top = not bool(stack.pop()) stack.append(top) else:", "e = Expression('') while len(expression) > 2 and expression[0] == '(' and expression[-1]", "It calls methods to determine whether expression is correct semantically, in terms of", "Sthing we can iterate over with binary sequence and '_' :param s2: Sthing", "xor var2 xor var3 etc :param expression: String expression to be reduced. We", "yield \"1\" + c def find_value(zipped_list, x): for a, b in zipped_list: if", "'T': return 1 elif x == 'F': return 0 elif x in (ascii_lowercase", "self.operators[stack[-1]][0]) <= 0) \\ or ( is_associative(tkn, 'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) <", "for variables and returns them As a variable we mean any lower case", "variables = get_variables(str.join('|', expression_list)) binary_generator = generate_binary(len(variables)) incorrect_binaries = [] some_expression = Expression('')", "variables def calculate_onp(expression, values): \"\"\" Function calculates a value of an expression in", "of an expression in reverse polish notation :param expression: Expression in RPN given", "Expression('') onp_expression = some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y: x + '^'", "== 'T': return 1 elif x == 'F': return 0 elif x in", "is responsible for driving program. It calls functions to check if expression is", "return b return -1 def replace_mapping(zipped_list, x): if x == 'T': return 1", "expression = self.expression n = len(get_variables(expression)) correct_binaries = [] generator = generate_binary(n) current_expression", "'_': continue if e1[i] == '0': result += '~' result += ascii_lowercase[i] +", "result = set() b2 = False for e1 in s: b1 = False", "it is not correct \"\"\" expression_object = Expression(expression) if not expression_object.check_expression(): return 'ERROR'", "= '~^&|/>()TF' + ascii_lowercase self.expression = expression.replace(' ', '') self.operators = {'~': (4,", "for a, b in zipped_list: if a == x: return b return -1", "n = len(expressions_list) for a in range(2, n + 1): for expr in", "def concat(s1, s2): \"\"\" Helper function to reduce expressions :param s1: Sthing we", "else: return False return not state def check_expression(self, expression=''): \"\"\" Higher level interface", "a product of QuineMcCluskey algorithm :return: String containing reduced expression or the input", "\"\"\" Function reduces a tuple of string expressions :param expression: tuple containing expressions.", "correct signs and is semantically correct :param expression: String expression to be checked", "reduction was not possible \"\"\" expressions_list = expression.split('|') n = len(expressions_list) for a", "stack[-1] != '(': onp.append(stack.pop()) stack.pop() else: onp.append(tkn) while len(stack) > 0: onp.append(stack.pop()) return", "product of QuineMcCluskey algorithm :return: String containing reduced expression or the input one", "not in variables: variables.append(variable) return variables def calculate_onp(expression, values): \"\"\" Function calculates a", "a string. :param values: binary sequence with values to be put in coresponding", "string expressions :param expression: tuple containing expressions. We assume that they do not", "one if further reduction was not possible \"\"\" expressions_list = expression.split('|') n =", "= expression.split('|') n = len(expressions_list) for a in range(2, n + 1): for", "reduces expression :param expression: String expression to be reduced :return: reduced expression or", "+= ascii_lowercase[i] + \"&\" result2 += '(' + result[:-1] + ')|' if result2", "It calls functions to check if expression is correct and then reduces expression", "calculate_onp(onp_xor, x): incorrect_binaries.append(x) except: break if len(incorrect_binaries) > 0: return str.join('|', expression_list) return", "doesnt check whether this expression is correct :param expression: Infix expression :return: RPN", "expression :return: RPN expression \"\"\" if not expression: expression = self.expression stack =", "for e1 in s: result = \"\" for i in range(0, len(e1)): if", ":param n: length of a binary sequence :return: generator with binary sequence \"\"\"", "x in expression if x not in self.correctSigns]: return False state = True", "evaluated to: (expr1)|a|(expr2) :param expression: string expression in form (expr1)|(expr2)|(expr3) :return: reduced expression", "from infix expression It uses QuineMcCluskey algorithm Result matches a pattern: (expression1)|(expression2)|(expression3)... :param", "def generate_general_form(self, expression=''): \"\"\" Function generates general form from infix expression It uses", "[] while len(expression) > 0: if expression[0] in ['0', '1']: stack.append(int(expression[0])) else: if", "\"\"\" Helper function to reduce expressions :param s1: Sthing we can iterate over", "'&': lambda x, y: bool(x) and bool(y), '|': lambda x, y: bool(x) or", "expression: String expression to be checked :return: Bool result \"\"\" if not expression:", "= concat(e1, e2) if v: result.add(v) b1 = b2 = True if not", "input in pattern: (expression)|(expression)|(expression) or T (if expression is tautology) \"\"\" result2 =", "etc :param expression: String expression to be reduced. We assume that it matches", "def check_if_signs_are_correct(self, expression=''): \"\"\" Simple filter function that checks if expression contains correct", "== '~': top = not bool(stack.pop()) stack.append(top) else: e1 = int(stack.pop()) e2 =", "calculate_onp(expression, values): \"\"\" Function calculates a value of an expression in reverse polish", "while len(stack) > 0: onp.append(stack.pop()) return functools.reduce(lambda x, y: x + y, onp)", "( is_associative(tkn, 'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0): onp.append(stack.pop()) continue break stack.append(tkn)", "= [] for tkn in expression: if tkn in self.operators: while len(stack) >", "bool(x) ^ bool(y), '&': lambda x, y: bool(x) and bool(y), '|': lambda x,", "\"\"\" expression_list = list(expression) variables = get_variables(str.join('|', expression_list)) binary_generator = generate_binary(len(variables)) incorrect_binaries =", "checked continue elif single in (ascii_lowercase + 'TF'): state = False else: return", "(bool(x) and bool(y)), '>': lambda x, y: not bool(x) or bool(y)} stack =", "handle most of expression operations. It contains map with bindings: <operator> -> (priority,arguments_number)", "del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda x, y: '|' + x + y +", "if state: if single in self.operators and self.operators[single][1] == 1 or single in", "correct RNP expression and will not return any warnings in case of errors", "generate_general_form(self, expression=''): \"\"\" Function generates general form from infix expression It uses QuineMcCluskey", ":return: RPN expression \"\"\" if not expression: expression = self.expression stack = []", "replaced by '_' \"\"\" w = \"\" lz = 0 for z1, z2", "x): if x == 'T': return 1 elif x == 'F': return 0", "not expression: expression = self.expression n = len(get_variables(expression)) correct_binaries = [] generator =", "unessesary brackets. It eliminates situations where between two | there is a expression", "len(variables). It checks whether it is not the same as var1 xor var2", "coresponding positions. Also string :return: Bool value of an expression Warning: function will", "0 elif x in (ascii_lowercase + 'TF'): return find_value(zipped_list, x) else: return x", "correct \"\"\" expression_object = Expression(expression) if not expression_object.check_expression(): return 'ERROR' expression_in_general_form = expression_object.generate_general_form()", "RPN given as a string. :param values: binary sequence with values to be", "def reduce_xor(expression): \"\"\" Specific function to reduce xor expressions. It generates combinations of", "they are a product of QuineMcCluskey algorithm :return: String containing reduced expression or", "expression[-1] == ')' and e.check_expression(expression): expression = expression[1:-1] return expression def reduce_tuple(expression): \"\"\"", "= False for e2 in s: v = concat(e1, e2) if v: result.add(v)", "'>': (1, 2)} # <operator> -> (priority,arguments_number) def check_if_brackets_are_correct(self, expression=''): \"\"\" Helper function", "to be put in coresponding positions. Also string :return: Bool value of an", "n = len(get_variables(expression)) correct_binaries = [] generator = generate_binary(n) current_expression = self.convert_to_onp(expression) while", "situations where between two | there is a expression that doesnt need them", "x, y: bool(x) or bool(y), '/': lambda x, y: not (bool(x) and bool(y)),", "expression to be reduced. We assume that it matches a pattern: (expr1)|(expr2)|(expr3) ...", "== 'r': # because only in case of > it matters. return False", "if len(some) <= 4: # we are sure that there will be 2", "string expression in form (expr1)|(expr2)|(expr3) :return: reduced expression \"\"\" expression_list = expression.split('|') if", "expression_in_general_form = expression_object.generate_general_form() expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor) < len(expression): return expression_with_xor e", "are a product of QuineMcCluskey algorithm :return: String containing reduced expression or the", "case character :param expression: expression to search in :return: list with variables from", "int(stack.pop()) stack.append(operators[expression[0]](e2, e1)) del expression[0] return stack[0] def is_associative(tkn, associativity_type): if tkn ==", "binary_generator.__next__() if calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x): incorrect_binaries.append(x) except: break if len(incorrect_binaries) >", "False else: if single in self.operators and self.operators[single][1] == 2: # everything else", "return False return not state def check_expression(self, expression=''): \"\"\" Higher level interface for", "assume that it matches a pattern: (expr1)|(expr2)|(expr3) ... :return: reduced expression in string", "to check if expression is correct and then reduces expression :param expression: String", "String infix expression evaluated using QuineMcCluskey \"\"\" if not expression: expression = self.expression", "self.operators[single][1] == 1 or single in ['(', ')']: # we want ~ #", "0) \\ or ( is_associative(tkn, 'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0): onp.append(stack.pop())", "reduction was not successful \"\"\" expression_list = list(expression) variables = get_variables(str.join('|', expression_list)) binary_generator", "sequence and '_' :param s2: Sthing we can iterate over with binary sequence", "contains correct signs and is semantically correct :param expression: String expression to be", "Helper function to reduce expressions :param s1: Sthing we can iterate over with", ":return: Merged version of input, when certain bits are different this place is", "expression Warning: function will only work on correct RNP expression and will not", "yield \"\" else: for c in generate_binary(n - 1): yield \"0\" + c", "checking expression It calls methods to determine whether expression is correct semantically, in", "y: bool(x) and bool(y), '|': lambda x, y: bool(x) or bool(y), '/': lambda", "reduced :return: reduced expression or ERROR if it is not correct \"\"\" expression_object", "else: if expression[0] == '~': top = not bool(stack.pop()) stack.append(top) else: e1 =", "if not expression: expression = self.expression brackets = 0 for a in expression:", "single in ['(', ')']: # we want ~ # we ignore brackets since", "function will only work on correct RNP expression and will not return any", "if single in self.operators and self.operators[single][1] == 2: # everything else than ~", "in variables: variables.append(variable) return variables def calculate_onp(expression, values): \"\"\" Function calculates a value", "i feel really bad for this reduced_sub_expression = reduce_tuple(expr) prev_expression = str.join('|', expr)", "self.convert_to_onp(expression) while True: try: x = generator.__next__() if calculate_onp(current_expression, x): correct_binaries.append(x) except: break", "correct signs and expression itself \"\"\" def __init__(self, expression): self.general_form = '' self.correctSigns", "reduced expression or the input one if further reduction was not successful \"\"\"", "expression = self.expression stack = [] onp = [] for tkn in expression:", "def convert_to_onp(self, expression=''): \"\"\" Function converts an infix expression to RPN Warning: it", "trims brackets :return: expression with trimmed brackets \"\"\" e = Expression('') while len(expression)", "= reduce_tuple(expr) prev_expression = str.join('|', expr) if len(reduced_sub_expression) < len(prev_expression): for var in", "\"\"\" w = \"\" lz = 0 for z1, z2 in zip(s1, s2):", "we mean any lower case character :param expression: expression to search in :return:", "tkn == '>' and associativity_type == 'r': # because only in case of", "(ascii_lowercase + 'TF'): return find_value(zipped_list, x) else: return x def get_variables(expression): \"\"\" Functions", "expression: if state: if single in self.operators and self.operators[single][1] == 1 or single", "for e1 in s: b1 = False for e2 in s: v =", "value of an expression Warning: function will only work on correct RNP expression", "not bool(stack.pop()) stack.append(top) else: e1 = int(stack.pop()) e2 = int(stack.pop()) stack.append(operators[expression[0]](e2, e1)) del", "in this case they are a product of QuineMcCluskey algorithm :return: String containing", "with values :return: reduced set \"\"\" result = set() b2 = False for", "var1 xor var2 xor var3 etc :param expression: String expression to be reduced.", "')' and e.check_expression(expression): expression = expression[1:-1] return expression def reduce_tuple(expression): \"\"\" Function reduces", "<= 0) \\ or ( is_associative(tkn, 'r') and (self.operators[tkn][0] - self.operators[stack[-1]][0]) < 0):", "expression[0] in ['0', '1']: stack.append(int(expression[0])) else: if expression[0] == '~': top = not", "if z1 == z2: w += z1 else: lz += 1 w +=", "self.operators and self.operators[single][1] == 1 or single in ['(', ')']: # we want", "with bindings: <operator> -> (priority,arguments_number) Also string with correct signs and expression itself", "already checked continue elif single in (ascii_lowercase + 'TF'): state = False else:", "expression_list) return '(' + functools.reduce(lambda x, y: x + '^' + y, variables)", "lambda x, y: bool(x) and bool(y), '|': lambda x, y: bool(x) or bool(y),", "variables) + ')' def reduce_xor(expression): \"\"\" Specific function to reduce xor expressions. It", "We assume that they do not contain '|' since in this case they", "determine whether expression is correct semantically, in terms of brackets and signs :param", "are placed correctly :param expression: expression in String form :return: Bool result of", "matches a pattern: (expression1)|(expression2)|(expression3)... :param expression: Infix expression as a String :return: String", "(ascii_lowercase + 'TF'): state = False else: return False else: if single in", "== 1: return w return False def reduce_(s): \"\"\" Main reduce function :param", "will be evaluated to: (expr1)|a|(expr2) :param expression: string expression in form (expr1)|(expr2)|(expr3) :return:", "1: return w return False def reduce_(s): \"\"\" Main reduce function :param s:", "in pattern: (expression)|(expression)|(expression) or T (if expression is tautology) \"\"\" result2 = \"\"", "len(incorrect_binaries) > 0: return str.join('|', expression_list) return '(' + functools.reduce(lambda x, y: x", "\"\"\" result = set() b2 = False for e1 in s: b1 =", "expression): self.general_form = '' self.correctSigns = '~^&|/>()TF' + ascii_lowercase self.expression = expression.replace(' ',", "for driving program. It calls functions to check if expression is correct and", "expression in string form or input one if further reduction was not possible", "e.check_expression(expression): expression = expression[1:-1] return expression def reduce_tuple(expression): \"\"\" Function reduces a tuple", "form :return: Bool result of brackets checking \"\"\" if not expression: expression =", "\"\"\" zipped_list = list(zip(get_variables(expression), list(values))) expression = list(map(lambda x: replace_mapping(zipped_list, x), expression)) operators", "x = binary_generator.__next__() if calculate_onp(onp_expression, x) != calculate_onp(onp_xor, x): incorrect_binaries.append(x) except: break if", "Also string :return: Bool value of an expression Warning: function will only work", "expression in String form :return: Bool result of brackets checking \"\"\" if not", "expression: if tkn in self.operators: while len(stack) > 0 and stack[-1] in self.operators:", "self.operators[stack[-1]][0]) < 0): onp.append(stack.pop()) continue break stack.append(tkn) elif tkn == '(': stack.append(tkn) elif", "values to be put in coresponding positions. Also string :return: Bool value of", "really bad for this reduced_sub_expression = reduce_tuple(expr) prev_expression = str.join('|', expr) if len(reduced_sub_expression)", "containing reduced expression or the input one if further reduction was not successful", "self.correctSigns = '~^&|/>()TF' + ascii_lowercase self.expression = expression.replace(' ', '') self.operators = {'~':", "sure that there will be 2 brackets + we want 1 variable (or", "in self.operators: while len(stack) > 0 and stack[-1] in self.operators: if (is_associative(tkn, 'l')", "== ')': while len(stack) > 0 and stack[-1] != '(': onp.append(stack.pop()) stack.pop() else:", "algorithm :return: String containing reduced expression or the input one if further reduction", "of k elements in len(variables) where k is in range from 2 to", "to determine whether brackets are placed correctly :param expression: expression in String form", "stack.pop() else: onp.append(tkn) while len(stack) > 0: onp.append(stack.pop()) return functools.reduce(lambda x, y: x", "be 2 brackets + we want 1 variable (or variable + negation) reduced_expressions.append(trim_expression(some))", "'~' result += ascii_lowercase[i] + \"&\" result2 += '(' + result[:-1] + ')|'", "with values :return: String made from input in pattern: (expression)|(expression)|(expression) or T (if", "combinations def generate_binary(n): \"\"\" Function returns generator with binary sequences of a set", "reduce_(s): \"\"\" Main reduce function :param s: Set with values :return: reduced set", "for i in range(0, len(e1)): if e1[i] == '_': continue if e1[i] ==", "correct semantically, in terms of brackets and signs :param expression: String expression to", "+ c def find_value(zipped_list, x): for a, b in zipped_list: if a ==", "correct_binaries.append(x) except: break set2 = reduce_(correct_binaries) self.general_form = expression_to_string(set2) return self.general_form if __name__", "stack.append(tkn) elif tkn == '(': stack.append(tkn) elif tkn == ')': while len(stack) >", "of brackets and signs :param expression: String expression to check :return: Bool result", "functools.reduce(lambda x, y: x + '^' + y, variables) + ')' def reduce_xor(expression):", "2 brackets + we want 1 variable (or variable + negation) reduced_expressions.append(trim_expression(some)) else:", "= int(stack.pop()) e2 = int(stack.pop()) stack.append(operators[expression[0]](e2, e1)) del expression[0] return stack[0] def is_associative(tkn,", "str.join('|', expr) if len(reduced_sub_expression) < len(prev_expression): for var in list(expr): del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression)", "contain '|' since in this case they are a product of QuineMcCluskey algorithm", "than ~ state = True elif single in ['(', ')']: continue else: return", "some in expression_list: if len(some) <= 4: # we are sure that there", "for c in generate_binary(n - 1): yield \"0\" + c yield \"1\" +", "len(get_variables(expression)) correct_binaries = [] generator = generate_binary(n) current_expression = self.convert_to_onp(expression) while True: try:", "True if [x for x in expression if x not in self.correctSigns]: return", "and expression itself \"\"\" def __init__(self, expression): self.general_form = '' self.correctSigns = '~^&|/>()TF'", "expression_to_string(s): \"\"\" Helper function to change a reduced set to human-readable form :param", "self.general_form = '' self.correctSigns = '~^&|/>()TF' + ascii_lowercase self.expression = expression.replace(' ', '')", "expression is correct :param expression: Infix expression :return: RPN expression \"\"\" if not", "~ state = True elif single in ['(', ')']: continue else: return False", "sequence :return: generator with binary sequence \"\"\" if n == 0: yield \"\"", "cases matches a pattern: (expression) and trims brackets :return: expression with trimmed brackets", "string. :param values: binary sequence with values to be put in coresponding positions.", "x not in self.correctSigns]: return False state = True for single in expression:", "2 and expression[0] == '(' and expression[-1] == ')' and e.check_expression(expression): expression =", "is in range from 2 to len(variables). It checks whether it is not", "result += ascii_lowercase[i] + \"&\" result2 += '(' + result[:-1] + ')|' if", "e2 = int(stack.pop()) stack.append(operators[expression[0]](e2, e1)) del expression[0] return stack[0] def is_associative(tkn, associativity_type): if", "result2[:-1] def trim_expression(expression): \"\"\" Basic expression trimming :param expression: takes an expression which", "tkn in self.operators: while len(stack) > 0 and stack[-1] in self.operators: if (is_associative(tkn,", "or ERROR if it is not correct \"\"\" expression_object = Expression(expression) if not", "'(' and expression[-1] == ')' and e.check_expression(expression): expression = expression[1:-1] return expression def", "0: if expression[0] in ['0', '1']: stack.append(int(expression[0])) else: if expression[0] == '~': top", "False else: return False else: if single in self.operators and self.operators[single][1] == 2:", "whether it is not the same as var1 xor var2 xor var3 etc", "2)} # <operator> -> (priority,arguments_number) def check_if_brackets_are_correct(self, expression=''): \"\"\" Helper function to determine", "String expression to be reduced. We assume that it matches a pattern: (expr1)|(expr2)|(expr3)", "except: break set2 = reduce_(correct_binaries) self.general_form = expression_to_string(set2) return self.general_form if __name__ ==", "that doesnt need them example: (expr1)|(a)|(expr2) will be evaluated to: (expr1)|a|(expr2) :param expression:", "algorithm Result matches a pattern: (expression1)|(expression2)|(expression3)... :param expression: Infix expression as a String", "in expression: if variable in ascii_lowercase and variable not in variables: variables.append(variable) return", "bool(y)} stack = [] while len(expression) > 0: if expression[0] in ['0', '1']:", "s: result = \"\" for i in range(0, len(e1)): if e1[i] == '_':", "to RPN Warning: it doesnt check whether this expression is correct :param expression:", "expression=''): \"\"\" Higher level interface for checking expression It calls methods to determine", ":param expression: String expression to be reduced. We assume that it matches a", "in ascii_lowercase and variable not in variables: variables.append(variable) return variables def calculate_onp(expression, values):", "string :return: Bool value of an expression Warning: function will only work on", "0 and stack[-1] != '(': onp.append(stack.pop()) stack.pop() else: onp.append(tkn) while len(stack) > 0:", "errors \"\"\" zipped_list = list(zip(get_variables(expression), list(values))) expression = list(map(lambda x: replace_mapping(zipped_list, x), expression))", "top = not bool(stack.pop()) stack.append(top) else: e1 = int(stack.pop()) e2 = int(stack.pop()) stack.append(operators[expression[0]](e2,", "for some in expression_list: if len(some) <= 4: # we are sure that", "+ '^' + y, variables) + ')' def reduce_xor(expression): \"\"\" Specific function to", "of brackets checking \"\"\" if not expression: expression = self.expression brackets = 0", "+ 1): for expr in combinations(expressions_list, a): # i feel really bad for", "expression[0] == '(' and expression[-1] == ')' and e.check_expression(expression): expression = expression[1:-1] return", "stack = [] while len(expression) > 0: if expression[0] in ['0', '1']: stack.append(int(expression[0]))", "calculate_onp(current_expression, x): correct_binaries.append(x) except: break set2 = reduce_(correct_binaries) self.general_form = expression_to_string(set2) return self.general_form", "tautology) \"\"\" result2 = \"\" for e1 in s: result = \"\" for", "correctly :param expression: expression in String form :return: Bool result of brackets checking", "in self.correctSigns]: return False state = True for single in expression: if state:", "+ '|', expressions_list)) return expression def reduce_brackets(expression): \"\"\" Function that reduces unessesary brackets.", "Bool result \"\"\" if not expression: expression = self.expression if not expression: return", "from expression \"\"\" variables = [] for variable in expression: if variable in", "'TF'): return find_value(zipped_list, x) else: return x def get_variables(expression): \"\"\" Functions filters the", "some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x, y: x + '^' + y, variables))", "in case of errors \"\"\" zipped_list = list(zip(get_variables(expression), list(values))) expression = list(map(lambda x:", "'(' + functools.reduce(lambda x, y: x + '^' + y, variables) + ')'", "'_' :return: Merged version of input, when certain bits are different this place", "whether expression is correct semantically, in terms of brackets and signs :param expression:", "not contain '|' since in this case they are a product of QuineMcCluskey", "with binary sequences of a set length :param n: length of a binary", "to len(variables). It checks whether it is not the same as var1 xor", "True def concat(s1, s2): \"\"\" Helper function to reduce expressions :param s1: Sthing", "return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression) def convert_to_onp(self, expression=''): \"\"\" Function converts an infix expression", "list(expression) variables = get_variables(str.join('|', expression_list)) binary_generator = generate_binary(len(variables)) incorrect_binaries = [] some_expression =", "# everything else than ~ state = True elif single in ['(', ')']:", "they do not contain '|' since in this case they are a product", "trim_expression(expression): \"\"\" Basic expression trimming :param expression: takes an expression which in most", "concat(e1, e2) if v: result.add(v) b1 = b2 = True if not b1:", "result of brackets checking \"\"\" if not expression: expression = self.expression brackets =", "[] onp = [] for tkn in expression: if tkn in self.operators: while", "def trim_expression(expression): \"\"\" Basic expression trimming :param expression: takes an expression which in", "will be 2 brackets + we want 1 variable (or variable + negation)", "signs and is semantically correct :param expression: String expression to be checked :return:", "+= '(' + result[:-1] + ')|' if result2 == '()|': return 'T' return", "= False else: return False else: if single in self.operators and self.operators[single][1] ==", "iterate over with binary sequence and '_' :return: Merged version of input, when", "import combinations def generate_binary(n): \"\"\" Function returns generator with binary sequences of a", "expression = self.expression if not expression: return True if [x for x in", "if not expression: expression = self.expression stack = [] onp = [] for", "= True if not b1: result.add(e1) if b2: return reduce_(result) return result def", "expression with trimmed brackets \"\"\" e = Expression('') while len(expression) > 2 and", "expression_object.generate_general_form() expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form)) if len(expression_with_xor) < len(expression): return expression_with_xor e = reduce_brackets(expression_in_general_form)", "check whether this expression is correct :param expression: Infix expression :return: RPN expression", "= get_variables(str.join('|', expression_list)) binary_generator = generate_binary(len(variables)) incorrect_binaries = [] some_expression = Expression('') onp_expression", "False if brackets == 0: return True return False def check_if_signs_are_correct(self, expression=''): \"\"\"", "check :return: Bool result \"\"\" if not expression: expression = self.expression return self.check_if_signs_are_correct(expression)", "in ['0', '1']: stack.append(int(expression[0])) else: if expression[0] == '~': top = not bool(stack.pop())", "return expression_with_xor e = reduce_brackets(expression_in_general_form) if len(e) < len(expression): return e return reduce_brackets(expression)", "not b1: result.add(e1) if b2: return reduce_(result) return result def expression_to_string(s): \"\"\" Helper", "\"\"\" if not expression: expression = self.expression return self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression) def convert_to_onp(self,", "\"\"\" def __init__(self, expression): self.general_form = '' self.correctSigns = '~^&|/>()TF' + ascii_lowercase self.expression", "form or input one if further reduction was not possible \"\"\" expressions_list =", "== '_': continue if e1[i] == '0': result += '~' result += ascii_lowercase[i]", "driving program. It calls functions to check if expression is correct and then", "False state = True for single in expression: if state: if single in", "then reduces expression :param expression: String expression to be reduced :return: reduced expression", "self.check_if_signs_are_correct(expression) and self.check_if_brackets_are_correct(expression) def convert_to_onp(self, expression=''): \"\"\" Function converts an infix expression to", "itself \"\"\" def __init__(self, expression): self.general_form = '' self.correctSigns = '~^&|/>()TF' + ascii_lowercase", "polish notation :param expression: Expression in RPN given as a string. :param values:", "in range(0, len(e1)): if e1[i] == '_': continue if e1[i] == '0': result", "are different this place is being replaced by '_' \"\"\" w = \"\"", "x, y: '|' + x + y + '|', expressions_list)) return expression def", "range from 2 to len(variables). It checks whether it is not the same", "reduced expression in string form or input one if further reduction was not", "__init__(self, expression): self.general_form = '' self.correctSigns = '~^&|/>()TF' + ascii_lowercase self.expression = expression.replace('", "= \"\" lz = 0 for z1, z2 in zip(s1, s2): if z1", "self.expression brackets = 0 for a in expression: if a == '(': brackets", "work on correct RNP expression and will not return any warnings in case", "generator with binary sequences of a set length :param n: length of a", "1): yield \"0\" + c yield \"1\" + c def find_value(zipped_list, x): for", "uses QuineMcCluskey algorithm Result matches a pattern: (expression1)|(expression2)|(expression3)... :param expression: Infix expression as", "same as var1 xor var2 xor var3 etc :param expression: String expression to", "state = True for single in expression: if state: if single in self.operators", "y + '|', expressions_list)) return expression def reduce_brackets(expression): \"\"\" Function that reduces unessesary", "checks if expression contains correct signs and is semantically correct :param expression: String", "| there is a expression that doesnt need them example: (expr1)|(a)|(expr2) will be", "ignore brackets since they are already checked continue elif single in (ascii_lowercase +", "to be checked :return: Bool result \"\"\" if not expression: expression = self.expression", "T (if expression is tautology) \"\"\" result2 = \"\" for e1 in s:", "= expression.replace(' ', '') self.operators = {'~': (4, 1), '^': (3, 2), '&':", "= [] for variable in expression: if variable in ascii_lowercase and variable not", "incorrect_binaries.append(x) except: break if len(incorrect_binaries) > 0: return str.join('|', expression_list) return '(' +", "> 0 and stack[-1] != '(': onp.append(stack.pop()) stack.pop() else: onp.append(tkn) while len(stack) >", "elif a == ')': brackets -= 1 if brackets < 0: return False", "check if expression is correct and then reduces expression :param expression: String expression", "check_if_brackets_are_correct(self, expression=''): \"\"\" Helper function to determine whether brackets are placed correctly :param", "+ y, onp) def generate_general_form(self, expression=''): \"\"\" Function generates general form from infix", "b in zipped_list: if a == x: return b return -1 def replace_mapping(zipped_list,", "'(': brackets += 1 elif a == ')': brackets -= 1 if brackets", "zipped_list = list(zip(get_variables(expression), list(values))) expression = list(map(lambda x: replace_mapping(zipped_list, x), expression)) operators =", "elif single in ['(', ')']: continue else: return False return not state def", "stack.append(int(expression[0])) else: if expression[0] == '~': top = not bool(stack.pop()) stack.append(top) else: e1", "an infix expression to RPN Warning: it doesnt check whether this expression is", "tuple of string expressions :param expression: tuple containing expressions. We assume that they", "len(reduced_sub_expression) < len(prev_expression): for var in list(expr): del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda x,", "generator = generate_binary(n) current_expression = self.convert_to_onp(expression) while True: try: x = generator.__next__() if", ":return: String made from input in pattern: (expression)|(expression)|(expression) or T (if expression is", "result2 += '(' + result[:-1] + ')|' if result2 == '()|': return 'T'", "return 1 elif x == 'F': return 0 elif x in (ascii_lowercase +", "= expression[1:-1] return expression def reduce_tuple(expression): \"\"\" Function reduces a tuple of string", "expression that doesnt need them example: (expr1)|(a)|(expr2) will be evaluated to: (expr1)|a|(expr2) :param", "= Expression(expression) if not expression_object.check_expression(): return 'ERROR' expression_in_general_form = expression_object.generate_general_form() expression_with_xor = reduce_brackets(reduce_xor(expression_in_general_form))", "a in range(2, n + 1): for expr in combinations(expressions_list, a): # i", "in String form :return: Bool result of brackets checking \"\"\" if not expression:", "it doesnt check whether this expression is correct :param expression: Infix expression :return:", "len(expression_with_xor) < len(expression): return expression_with_xor e = reduce_brackets(expression_in_general_form) if len(e) < len(expression): return", "== '(': stack.append(tkn) elif tkn == ')': while len(stack) > 0 and stack[-1]", "not expression: expression = self.expression brackets = 0 for a in expression: if", "we ignore brackets since they are already checked continue elif single in (ascii_lowercase", "'') self.operators = {'~': (4, 1), '^': (3, 2), '&': (2, 2), '|':", "not in self.correctSigns]: return False state = True for single in expression: if", "return False if brackets == 0: return True return False def check_if_signs_are_correct(self, expression=''):", "to change a reduced set to human-readable form :param s: Set with values", "a): # i feel really bad for this reduced_sub_expression = reduce_tuple(expr) prev_expression =", "and self.check_if_brackets_are_correct(expression) def convert_to_onp(self, expression=''): \"\"\" Function converts an infix expression to RPN", "'(': onp.append(stack.pop()) stack.pop() else: onp.append(tkn) while len(stack) > 0: onp.append(stack.pop()) return functools.reduce(lambda x,", "function that checks if expression contains correct signs and is semantically correct :param", "self.operators: while len(stack) > 0 and stack[-1] in self.operators: if (is_associative(tkn, 'l') and", "terms of brackets and signs :param expression: String expression to check :return: Bool", "= [] some_expression = Expression('') onp_expression = some_expression.convert_to_onp(str.join('|', expression_list)) onp_xor = some_expression.convert_to_onp(functools.reduce(lambda x,", "for a in range(2, n + 1): for expr in combinations(expressions_list, a): #", "(if expression is tautology) \"\"\" result2 = \"\" for e1 in s: result", "a, b in zipped_list: if a == x: return b return -1 def", "str.join('|', reduced_expressions) def reduce_logical_expression(expression): \"\"\" Main function that is responsible for driving program.", "some_expression.convert_to_onp(functools.reduce(lambda x, y: x + '^' + y, variables)) while True: try: x", "< len(prev_expression): for var in list(expr): del expressions_list[expressions_list.index(var)] expressions_list.append(reduced_sub_expression) return reduce_xor(functools.reduce(lambda x, y:", "for z1, z2 in zip(s1, s2): if z1 == z2: w += z1", "expression: if a == '(': brackets += 1 elif a == ')': brackets", "do not contain '|' since in this case they are a product of", "break stack.append(tkn) elif tkn == '(': stack.append(tkn) elif tkn == ')': while len(stack)", "'(' + result[:-1] + ')|' if result2 == '()|': return 'T' return result2[:-1]", "doesnt need them example: (expr1)|(a)|(expr2) will be evaluated to: (expr1)|a|(expr2) :param expression: string", "in expression: if a == '(': brackets += 1 elif a == ')':", "function :param s: Set with values :return: reduced set \"\"\" result = set()", "It uses QuineMcCluskey algorithm Result matches a pattern: (expression1)|(expression2)|(expression3)... :param expression: Infix expression", "= self.expression n = len(get_variables(expression)) correct_binaries = [] generator = generate_binary(n) current_expression =", "z1 else: lz += 1 w += \"_\" if lz == 1: return", "2), '/': (2, 2), '>': (1, 2)} # <operator> -> (priority,arguments_number) def check_if_brackets_are_correct(self,", ":param expression: Infix expression :return: RPN expression \"\"\" if not expression: expression =", "string import ascii_lowercase import functools from itertools import combinations def generate_binary(n): \"\"\" Function", "else: if single in self.operators and self.operators[single][1] == 2: # everything else than", "result = \"\" for i in range(0, len(e1)): if e1[i] == '_': continue", "if further reduction was not successful \"\"\" expression_list = list(expression) variables = get_variables(str.join('|',", "with trimmed brackets \"\"\" e = Expression('') while len(expression) > 2 and expression[0]", "if len(expression_list) == 1: return trim_expression(expression_list[0]) reduced_expressions = [] for some in expression_list:", "lambda x, y: bool(x) or bool(y), '/': lambda x, y: not (bool(x) and", "everything else than ~ state = True elif single in ['(', ')']: continue", "while len(expression) > 0: if expression[0] in ['0', '1']: stack.append(int(expression[0])) else: if expression[0]", "Higher level interface for checking expression It calls methods to determine whether expression", "input, when certain bits are different this place is being replaced by '_'", "expression def reduce_tuple(expression): \"\"\" Function reduces a tuple of string expressions :param expression:", "reduce_(result) return result def expression_to_string(s): \"\"\" Helper function to change a reduced set", "'()|': return 'T' return result2[:-1] def trim_expression(expression): \"\"\" Basic expression trimming :param expression:", ":return: String containing reduced expression or the input one if further reduction was", "except: break if len(incorrect_binaries) > 0: return str.join('|', expression_list) return '(' + functools.reduce(lambda", "a pattern: (expression) and trims brackets :return: expression with trimmed brackets \"\"\" e", "+= \"_\" if lz == 1: return w return False def reduce_(s): \"\"\"", "== 'F': return 0 elif x in (ascii_lowercase + 'TF'): return find_value(zipped_list, x)", ":param expression: takes an expression which in most cases matches a pattern: (expression)", "Expression: \"\"\" Class designed to handle most of expression operations. It contains map", "infix expression evaluated using QuineMcCluskey \"\"\" if not expression: expression = self.expression n", "expressions :param expression: tuple containing expressions. We assume that they do not contain", "len(expression_list) == 1: return trim_expression(expression_list[0]) reduced_expressions = [] for some in expression_list: if", "variables: variables.append(variable) return variables def calculate_onp(expression, values): \"\"\" Function calculates a value of", "len(expression) > 0: if expression[0] in ['0', '1']: stack.append(int(expression[0])) else: if expression[0] ==", "correct :param expression: Infix expression :return: RPN expression \"\"\" if not expression: expression", "of input, when certain bits are different this place is being replaced by" ]
[ "2021-09-08 14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('bjorn',", "('bjorn', '0009_auto_20210908_1427'), ] operations = [ migrations.AlterField( model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2,", "changed'), (5, 'Superseded'), (6, 'Cessation of operation'), (7, 'Certificate hold'), (8, 'Remove from", "(8, 'Remove from CRL'), (9, 'Privilege withdrawn'), (10, 'AA compromise')], null=True), ), ]", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('bjorn', '0009_auto_20210908_1427'), ] operations", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('bjorn', '0009_auto_20210908_1427'), ] operations =", "(4, 'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation of operation'), (7, 'Certificate hold'), (8,", "[ migrations.AlterField( model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key compromise'), (3, 'CA compromise'),", "migrations.AlterField( model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key compromise'), (3, 'CA compromise'), (4,", "on 2021-09-08 14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key compromise'), (3, 'CA compromise'), (4, 'Affiliation changed'), (5,", "] operations = [ migrations.AlterField( model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key compromise'),", "model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key compromise'), (3, 'CA compromise'), (4, 'Affiliation", "(5, 'Superseded'), (6, 'Cessation of operation'), (7, 'Certificate hold'), (8, 'Remove from CRL'),", "[ ('bjorn', '0009_auto_20210908_1427'), ] operations = [ migrations.AlterField( model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'),", "'Unspecified'), (2, 'Key compromise'), (3, 'CA compromise'), (4, 'Affiliation changed'), (5, 'Superseded'), (6,", "migrations, models class Migration(migrations.Migration): dependencies = [ ('bjorn', '0009_auto_20210908_1427'), ] operations = [", "name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key compromise'), (3, 'CA compromise'), (4, 'Affiliation changed'),", "operations = [ migrations.AlterField( model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key compromise'), (3,", "'Cessation of operation'), (7, 'Certificate hold'), (8, 'Remove from CRL'), (9, 'Privilege withdrawn'),", "Generated by Django 3.2.6 on 2021-09-08 14:44 from django.db import migrations, models class", "'CA compromise'), (4, 'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation of operation'), (7, 'Certificate", "# Generated by Django 3.2.6 on 2021-09-08 14:44 from django.db import migrations, models", "14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('bjorn', '0009_auto_20210908_1427'),", "operation'), (7, 'Certificate hold'), (8, 'Remove from CRL'), (9, 'Privilege withdrawn'), (10, 'AA", "(7, 'Certificate hold'), (8, 'Remove from CRL'), (9, 'Privilege withdrawn'), (10, 'AA compromise')],", "'0009_auto_20210908_1427'), ] operations = [ migrations.AlterField( model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key", "'Certificate hold'), (8, 'Remove from CRL'), (9, 'Privilege withdrawn'), (10, 'AA compromise')], null=True),", "choices=[(1, 'Unspecified'), (2, 'Key compromise'), (3, 'CA compromise'), (4, 'Affiliation changed'), (5, 'Superseded'),", "= [ ('bjorn', '0009_auto_20210908_1427'), ] operations = [ migrations.AlterField( model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1,", "Django 3.2.6 on 2021-09-08 14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "hold'), (8, 'Remove from CRL'), (9, 'Privilege withdrawn'), (10, 'AA compromise')], null=True), ),", "= [ migrations.AlterField( model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True, choices=[(1, 'Unspecified'), (2, 'Key compromise'), (3, 'CA", "(6, 'Cessation of operation'), (7, 'Certificate hold'), (8, 'Remove from CRL'), (9, 'Privilege", "'Superseded'), (6, 'Cessation of operation'), (7, 'Certificate hold'), (8, 'Remove from CRL'), (9,", "models class Migration(migrations.Migration): dependencies = [ ('bjorn', '0009_auto_20210908_1427'), ] operations = [ migrations.AlterField(", "class Migration(migrations.Migration): dependencies = [ ('bjorn', '0009_auto_20210908_1427'), ] operations = [ migrations.AlterField( model_name='certificate',", "compromise'), (4, 'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation of operation'), (7, 'Certificate hold'),", "'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation of operation'), (7, 'Certificate hold'), (8, 'Remove", "by Django 3.2.6 on 2021-09-08 14:44 from django.db import migrations, models class Migration(migrations.Migration):", "compromise'), (3, 'CA compromise'), (4, 'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation of operation'),", "(3, 'CA compromise'), (4, 'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation of operation'), (7,", "Migration(migrations.Migration): dependencies = [ ('bjorn', '0009_auto_20210908_1427'), ] operations = [ migrations.AlterField( model_name='certificate', name='revocation_reason',", "'Key compromise'), (3, 'CA compromise'), (4, 'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation of", "3.2.6 on 2021-09-08 14:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "(2, 'Key compromise'), (3, 'CA compromise'), (4, 'Affiliation changed'), (5, 'Superseded'), (6, 'Cessation", "of operation'), (7, 'Certificate hold'), (8, 'Remove from CRL'), (9, 'Privilege withdrawn'), (10,", "dependencies = [ ('bjorn', '0009_auto_20210908_1427'), ] operations = [ migrations.AlterField( model_name='certificate', name='revocation_reason', field=models.PositiveSmallIntegerField(blank=True,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('bjorn', '0009_auto_20210908_1427'), ]" ]
[ "<filename>codershq/users/migrations/0004_auto_20210805_1832.py<gh_stars>10-100 # Generated by Django 3.0.11 on 2021-08-05 14:32 from django.db import migrations,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20210805_1818'), ]", "field=models.TextField(blank=True, max_length=500, verbose_name='Bio'), ), migrations.AlterField( model_name='user', name='name', field=models.CharField(blank=True, max_length=255, verbose_name='Enter your name'), ),", "Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20210805_1818'), ] operations = [ migrations.RemoveField( model_name='user', name='cv',", "on 2021-08-05 14:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20210805_1818'), ] operations = [", "dependencies = [ ('users', '0003_auto_20210805_1818'), ] operations = [ migrations.RemoveField( model_name='user', name='cv', ),", "14:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20210805_1818'),", "# Generated by Django 3.0.11 on 2021-08-05 14:32 from django.db import migrations, models", "class Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20210805_1818'), ] operations = [ migrations.RemoveField( model_name='user',", "] operations = [ migrations.RemoveField( model_name='user', name='cv', ), migrations.AlterField( model_name='user', name='bio', field=models.TextField(blank=True, max_length=500,", "('users', '0003_auto_20210805_1818'), ] operations = [ migrations.RemoveField( model_name='user', name='cv', ), migrations.AlterField( model_name='user', name='bio',", "'0003_auto_20210805_1818'), ] operations = [ migrations.RemoveField( model_name='user', name='cv', ), migrations.AlterField( model_name='user', name='bio', field=models.TextField(blank=True,", "Django 3.0.11 on 2021-08-05 14:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "models class Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20210805_1818'), ] operations = [ migrations.RemoveField(", "by Django 3.0.11 on 2021-08-05 14:32 from django.db import migrations, models class Migration(migrations.Migration):", "max_length=500, verbose_name='Bio'), ), migrations.AlterField( model_name='user', name='name', field=models.CharField(blank=True, max_length=255, verbose_name='Enter your name'), ), ]", "= [ ('users', '0003_auto_20210805_1818'), ] operations = [ migrations.RemoveField( model_name='user', name='cv', ), migrations.AlterField(", "= [ migrations.RemoveField( model_name='user', name='cv', ), migrations.AlterField( model_name='user', name='bio', field=models.TextField(blank=True, max_length=500, verbose_name='Bio'), ),", "model_name='user', name='bio', field=models.TextField(blank=True, max_length=500, verbose_name='Bio'), ), migrations.AlterField( model_name='user', name='name', field=models.CharField(blank=True, max_length=255, verbose_name='Enter your", "migrations.RemoveField( model_name='user', name='cv', ), migrations.AlterField( model_name='user', name='bio', field=models.TextField(blank=True, max_length=500, verbose_name='Bio'), ), migrations.AlterField( model_name='user',", "Generated by Django 3.0.11 on 2021-08-05 14:32 from django.db import migrations, models class", "3.0.11 on 2021-08-05 14:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "[ ('users', '0003_auto_20210805_1818'), ] operations = [ migrations.RemoveField( model_name='user', name='cv', ), migrations.AlterField( model_name='user',", "operations = [ migrations.RemoveField( model_name='user', name='cv', ), migrations.AlterField( model_name='user', name='bio', field=models.TextField(blank=True, max_length=500, verbose_name='Bio'),", "migrations.AlterField( model_name='user', name='bio', field=models.TextField(blank=True, max_length=500, verbose_name='Bio'), ), migrations.AlterField( model_name='user', name='name', field=models.CharField(blank=True, max_length=255, verbose_name='Enter", "), migrations.AlterField( model_name='user', name='bio', field=models.TextField(blank=True, max_length=500, verbose_name='Bio'), ), migrations.AlterField( model_name='user', name='name', field=models.CharField(blank=True, max_length=255,", "model_name='user', name='cv', ), migrations.AlterField( model_name='user', name='bio', field=models.TextField(blank=True, max_length=500, verbose_name='Bio'), ), migrations.AlterField( model_name='user', name='name',", "name='cv', ), migrations.AlterField( model_name='user', name='bio', field=models.TextField(blank=True, max_length=500, verbose_name='Bio'), ), migrations.AlterField( model_name='user', name='name', field=models.CharField(blank=True,", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20210805_1818'), ] operations =", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20210805_1818'), ] operations", "[ migrations.RemoveField( model_name='user', name='cv', ), migrations.AlterField( model_name='user', name='bio', field=models.TextField(blank=True, max_length=500, verbose_name='Bio'), ), migrations.AlterField(", "name='bio', field=models.TextField(blank=True, max_length=500, verbose_name='Bio'), ), migrations.AlterField( model_name='user', name='name', field=models.CharField(blank=True, max_length=255, verbose_name='Enter your name'),", "2021-08-05 14:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users'," ]
[]
[ "self.log: print >> sys.stderr, '[ERROR]', result result = None else: self.cache_miss = True", "None if self.log: if self.cache_miss: print >> sys.stderr, 'Retrieved \"%s\"' % url else:", "API # The important fields of each result are # - url (+", "print >> sys.stderr, 'Downloading from', url, '...' # Check blacklist parsed_url = urlparse.urlparse(url)", "'...' # Check blacklist parsed_url = urlparse.urlparse(url) if parsed_url.netloc in BLACKLIST: raise WebLoadingError('URL", "(%s)' % (url, self.get_path(url))) return result ################################################################ # GOOGLE SUGGEST GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q='", "content_type) result = response.read() self.write(url, result) except Exception, e: if self.log: print >>", "-*- import urllib, urllib2, urlparse, socket import json, sys, os, hashlib, subprocess, time", "BLACKLIST: raise WebLoadingError('URL %s in blacklist' % url) # Open web page opener", "visibleUrl, cacheUrl) # - titleNoFormatting (+ title) # - content GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q='", "Custom Search CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self, api_key, cx): self.api_key = api_key", "raise WebLoadingError(\"Non-HTML response: %s\" % content_type) result = response.read() self.write(url, result) except Exception,", "Use Custom search return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception): def __init__(self, msg): self.args =", "results]) except: # Stale bad cache ... pass # Use Custom search return", "self.get_google_search_url(keyword) result = self.read(old_url) if result and not isinstance(result, WebLoadingError): # Found result", "self.get_path(url, already_hashed) if os.path.exists(path): with open(path) as fin: error = False check_url =", "# Stale bad cache ... pass # Use Custom search return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword))", "response = opener.open(url, timeout=self.timeout) # Check content type to prevent non-HTML content_type =", "= 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self, before, after=''): answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after)", "-*- coding: utf-8 -*- import urllib, urllib2, urlparse, socket import json, sys, os,", "os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log = log self.cache_miss = False self.timeout = timeout def get_hashcode(self,", "self.get_page(url, check_html=False) while True: try: return json.loads(result)['responseData']['results'] except: # Google nailed me! Exponential", "WebLoadingError(\"Non-HTML response: %s\" % content_type) result = response.read() self.write(url, result) except Exception, e:", "self.get_current_datetime(), '-->\\n')) def read(self, url, already_hashed=False): path = self.get_path(url, already_hashed) if os.path.exists(path): with", "urllib.quote(url), path if error: return WebLoadingError(error_message) else: return fin.read() def write(self, url, content,", "Check content type to prevent non-HTML content_type = response.info().type if check_html and content_type", "url (+ unescapedUrl, visibleUrl, cacheUrl) # - titleNoFormatting (+ title) # - content", "# GOOGLE SUGGEST GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self, before, after=''): answer = self.GOOGLE_SUGGEST_URL", "+= '&cp=' + str(len(before)) return answer def get_from_google_suggest(self, before, after=''): url = self.get_google_suggest_url(before,", "def set_google_custom_search_keys(self, api_key, cx): self.api_key = api_key self.cx = cx def get_google_custom_search_url(self, keyword):", "after) return json.loads(self.get_page(url, check_html=False))[1] ################################################################ # GOOGLE SEARCH -- old API # The", "...' % WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *= 2 result = self.get_page(url, check_html=False, force=True) def", "read(self, url, already_hashed=False): path = self.get_path(url, already_hashed) if os.path.exists(path): with open(path) as fin:", "'w') as fout: fout.write(self.comment(url)) fout.write(content) def write_error(self, url, error, already_hashed=False): path = self.get_path(url,", "' ') + '\\n') fout.write(self.comment(url)) def get_page(self, url, force=False, check_html=True): result = self.read(url)", "for x in results]) except: # Stale bad cache ... pass # Use", "\"%s\"' % url else: print >> sys.stderr, ('Loaded \"%s\" from cache (%s)' %", "write(self, url, content, already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w') as fout:", "answer = self.get_page(url, check_html=False) if raw: return answer return json.loads(answer) def get_urls_from_google_custom_search(self, keyword):", "self.write_error(url, str(e.message)) result = None if self.log: if self.cache_miss: print >> sys.stderr, 'Retrieved", "response.info().type if check_html and content_type != 'text/html': raise WebLoadingError(\"Non-HTML response: %s\" % content_type)", "'&cp=' + str(len(before)) return answer def get_from_google_suggest(self, before, after=''): url = self.get_google_suggest_url(before, after)", "################################################################ # GOOGLE SEARCH -- old API # The important fields of each", "old API # The important fields of each result are # - url", "[(x['unescapedUrl'], x['titleNoFormatting']) for x in results]) except: # Stale bad cache ... pass", "x.get('title', '')) for x in results] def get_urls_from_google_hybrid_search(self, keyword): '''Return (cache_path, results)''' old_url", "urllib2, urlparse, socket import json, sys, os, hashlib, subprocess, time from blacklist import", "results = self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] ################################################################ # GOOGLE", "keyword): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) while True: try: return json.loads(result)['responseData']['results']", "url else: print >> sys.stderr, ('Loaded \"%s\" from cache (%s)' % (url, self.get_path(url)))", "('Hide from Google for %d seconds ...' % WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *= 2", "sys.stderr, ('Loaded \"%s\" from cache (%s)' % (url, self.get_path(url))) return result ################################################################ #", "2 result = self.get_page(url, check_html=False, force=True) def get_urls_from_google_search_with_backoff(self, keyword): results = self.get_from_google_search_with_backoff(keyword) return", "sys, os, hashlib, subprocess, time from blacklist import BLACKLIST BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..')))", "except: # Stale bad cache ... pass # Use Custom search return (self.get_path(self.get_google_custom_search_url(keyword)),", "content type to prevent non-HTML content_type = response.info().type if check_html and content_type !=", "hashcode) subprocess.call([browser, path]) def comment(self, url): return ' '.join(('<!--', urllib.quote(url), self.get_current_datetime(), '-->\\n')) def", "-- old API # The important fields of each result are # -", "result ################################################################ # GOOGLE SUGGEST GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self, before, after=''): answer", "open(path, 'w') as fout: fout.write(self.comment(url)) fout.write(content) def write_error(self, url, error, already_hashed=False): path =", "(+ title) # - content GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self, keyword): answer =", "def write_error(self, url, error, already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w') as", "keyword, raw=False): url = self.get_google_custom_search_url(keyword) answer = self.get_page(url, check_html=False) if raw: return answer", "% \\ (self.api_key, self.cx, urllib.quote(keyword)) return answer def get_from_google_custom_search(self, keyword, raw=False): url =", "= False if isinstance(result, WebLoadingError): if self.log: print >> sys.stderr, '[ERROR]', result result", "answer def get_from_google_suggest(self, before, after=''): url = self.get_google_suggest_url(before, after) return json.loads(self.get_page(url, check_html=False))[1] ################################################################", "= fin.readline() if not already_hashed: tokens = check_url.split() assert len(tokens) > 2 and", "if not already_hashed: tokens = check_url.split() assert len(tokens) > 2 and tokens[1] ==", "Exception, e: if self.log: print >> sys.stderr, '[ERROR] ', e if isinstance(e, (WebLoadingError,", "subprocess.call([browser, path]) def comment(self, url): return ' '.join(('<!--', urllib.quote(url), self.get_current_datetime(), '-->\\n')) def read(self,", "if check_url == 'ERROR': error = True error_message = fin.readline().strip() check_url = fin.readline()", "fin: error = False check_url = fin.readline().strip() if check_url == 'ERROR': error =", "2 and tokens[1] == urllib.quote(url), path if error: return WebLoadingError(error_message) else: return fin.read()", "fin.read() def write(self, url, content, already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w')", "in results] ################################################################ # GOOGLE SEARCH -- Custom Search CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s'", "keyword): answer = self.CUSTOM_GOOGLE_SEARCH_URL % \\ (self.api_key, self.cx, urllib.quote(keyword)) return answer def get_from_google_custom_search(self,", "opener.open(url, timeout=self.timeout) # Check content type to prevent non-HTML content_type = response.info().type if", "= self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after) if after: answer += '&cp=' + str(len(before))", "titleNoFormatting (+ title) # - content GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self, keyword): answer", "answer return json.loads(answer) def get_urls_from_google_custom_search(self, keyword): results = self.get_from_google_custom_search(keyword)['items'] return [(x['link'], x.get('title', ''))", "url, already_hashed=False): path = self.get_path(url, already_hashed) if os.path.exists(path): with open(path) as fin: error", "answer = self.CUSTOM_GOOGLE_SEARCH_URL % \\ (self.api_key, self.cx, urllib.quote(keyword)) return answer def get_from_google_custom_search(self, keyword,", "################################################################ # GOOGLE SUGGEST GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self, before, after=''): answer =", "Open web page opener = urllib2.build_opener() opener.addheaders = [ ('User-agent', 'Mozilla/5.0 (compatible; MSIE", "force=False, check_html=True): result = self.read(url) if result and not force: self.cache_miss = False", "os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class WebpageCache(object): def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15): self.cachePath = os.path.join(basedir,", "# Check blacklist parsed_url = urlparse.urlparse(url) if parsed_url.netloc in BLACKLIST: raise WebLoadingError('URL %s", "WebLoadingError(Exception): def __init__(self, msg): self.args = (msg,) self.msg = msg self.message = msg", "urlparse, socket import json, sys, os, hashlib, subprocess, time from blacklist import BLACKLIST", "nailed me! Exponential backoff! print >> sys.stderr, ('Hide from Google for %d seconds", "dirname) if not os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log = log self.cache_miss = False self.timeout =", "from Google for %d seconds ...' % WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *= 2 result", "6.0)')] response = opener.open(url, timeout=self.timeout) # Check content type to prevent non-HTML content_type", "urllib.quote(url), self.get_current_datetime(), '-->\\n')) def read(self, url, already_hashed=False): path = self.get_path(url, already_hashed) if os.path.exists(path):", "self.GOOGLE_SEARCH_URL + urllib.quote(keyword) return answer def get_from_google_search(self, keyword, raw=False): url = self.get_google_search_url(keyword) result", "else: self.cache_miss = True try: if self.log: print >> sys.stderr, 'Downloading from', url,", "sys.stderr, '[ERROR]', result result = None else: self.cache_miss = True try: if self.log:", "urllib2.build_opener() opener.addheaders = [ ('User-agent', 'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 6.0)')] response", "'[ERROR] ', e if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)): self.write_error(url, str(e.message)) result = None", "url, error, already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write('ERROR\\n')", "content_type = response.info().type if check_html and content_type != 'text/html': raise WebLoadingError(\"Non-HTML response: %s\"", "type to prevent non-HTML content_type = response.info().type if check_html and content_type != 'text/html':", "try: results = json.loads(result)['responseData']['results'] return (self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]) except:", "cache (%s)' % (url, self.get_path(url))) return result ################################################################ # GOOGLE SUGGEST GOOGLE_SUGGEST_URL =", "if os.path.exists(path): with open(path) as fin: error = False check_url = fin.readline().strip() if", "result return json.loads(result) def get_urls_from_google_search(self, keyword): results = self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'], x['titleNoFormatting']) for", "self.cachePath = os.path.join(basedir, dirname) if not os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log = log self.cache_miss =", "fout: fout.write(self.comment(url)) fout.write(content) def write_error(self, url, error, already_hashed=False): path = self.get_path(url, already_hashed) with", "result result = None else: self.cache_miss = True try: if self.log: print >>", "raw=False): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) if raw: return result return", "True: try: return json.loads(result)['responseData']['results'] except: # Google nailed me! Exponential backoff! print >>", "= self.read(old_url) if result and not isinstance(result, WebLoadingError): # Found result in cache", "= log self.cache_miss = False self.timeout = timeout def get_hashcode(self, url): return hashlib.sha1(url).hexdigest()", "get_urls_from_google_search_with_backoff(self, keyword): results = self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] ################################################################", "def write(self, url, content, already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w') as", "fout: fout.write('ERROR\\n') fout.write(error.replace('\\n', ' ') + '\\n') fout.write(self.comment(url)) def get_page(self, url, force=False, check_html=True):", "hashlib, subprocess, time from blacklist import BLACKLIST BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class WebpageCache(object):", "prevent non-HTML content_type = response.info().type if check_html and content_type != 'text/html': raise WebLoadingError(\"Non-HTML", "= urllib2.build_opener() opener.addheaders = [ ('User-agent', 'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 6.0)')]", "as fin: error = False check_url = fin.readline().strip() if check_url == 'ERROR': error", "def get_urls_from_google_search_with_backoff(self, keyword): results = self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]", "x in results] GOOGLE_PAUSE = 30 def get_from_google_search_with_backoff(self, keyword): url = self.get_google_search_url(keyword) result", "get_urls_from_google_hybrid_search(self, keyword): '''Return (cache_path, results)''' old_url = self.get_google_search_url(keyword) result = self.read(old_url) if result", "json, sys, os, hashlib, subprocess, time from blacklist import BLACKLIST BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__,", "= os.path.join(self.cachePath, hashcode) subprocess.call([browser, path]) def comment(self, url): return ' '.join(('<!--', urllib.quote(url), self.get_current_datetime(),", "Google for %d seconds ...' % WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *= 2 result =", "False check_url = fin.readline().strip() if check_url == 'ERROR': error = True error_message =", "os.mkdir(self.cachePath) self.log = log self.cache_miss = False self.timeout = timeout def get_hashcode(self, url):", "print >> sys.stderr, ('Hide from Google for %d seconds ...' % WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE)", "as fout: fout.write('ERROR\\n') fout.write(error.replace('\\n', ' ') + '\\n') fout.write(self.comment(url)) def get_page(self, url, force=False,", "- content GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self, keyword): answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword)", "'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self, api_key, cx): self.api_key = api_key self.cx = cx def get_google_custom_search_url(self,", "if error: return WebLoadingError(error_message) else: return fin.read() def write(self, url, content, already_hashed=False): path", "= 30 def get_from_google_search_with_backoff(self, keyword): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) while", "return result return json.loads(result) def get_urls_from_google_search(self, keyword): results = self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'], x['titleNoFormatting'])", "self.cache_miss: print >> sys.stderr, 'Retrieved \"%s\"' % url else: print >> sys.stderr, ('Loaded", "(cache_path, results)''' old_url = self.get_google_search_url(keyword) result = self.read(old_url) if result and not isinstance(result,", "self.read(url) if result and not force: self.cache_miss = False if isinstance(result, WebLoadingError): if", "not already_hashed: url = self.get_hashcode(url) return os.path.join(self.cachePath, url) def get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime())", "return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def open_in_browser(self, hashcode, browser=\"firefox\"): path = os.path.join(self.cachePath, hashcode) subprocess.call([browser, path])", "self.timeout = timeout def get_hashcode(self, url): return hashlib.sha1(url).hexdigest() def get_path(self, url, already_hashed=False): if", "not already_hashed: tokens = check_url.split() assert len(tokens) > 2 and tokens[1] == urllib.quote(url),", "keyword): answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword) return answer def get_from_google_search(self, keyword, raw=False): url", "self.read(old_url) if result and not isinstance(result, WebLoadingError): # Found result in cache try:", "self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] ################################################################ # GOOGLE SEARCH --", "return os.path.join(self.cachePath, url) def get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def open_in_browser(self, hashcode, browser=\"firefox\"): path", "result = response.read() self.write(url, result) except Exception, e: if self.log: print >> sys.stderr,", "- url (+ unescapedUrl, visibleUrl, cacheUrl) # - titleNoFormatting (+ title) # -", "return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] ################################################################ # GOOGLE SEARCH -- Custom", "url, '...' # Check blacklist parsed_url = urlparse.urlparse(url) if parsed_url.netloc in BLACKLIST: raise", "return json.loads(self.get_page(url, check_html=False))[1] ################################################################ # GOOGLE SEARCH -- old API # The important", "(self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]) except: # Stale bad cache ...", "self.get_from_google_custom_search(keyword)['items'] return [(x['link'], x.get('title', '')) for x in results] def get_urls_from_google_hybrid_search(self, keyword): '''Return", "raw: return result return json.loads(result) def get_urls_from_google_search(self, keyword): results = self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'],", "= self.get_hashcode(url) return os.path.join(self.cachePath, url) def get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def open_in_browser(self, hashcode,", "self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) if raw: return result return json.loads(result) def get_urls_from_google_search(self,", "!= 'text/html': raise WebLoadingError(\"Non-HTML response: %s\" % content_type) result = response.read() self.write(url, result)", "fin.readline().strip() check_url = fin.readline() if not already_hashed: tokens = check_url.split() assert len(tokens) >", "print >> sys.stderr, '[ERROR]', result result = None else: self.cache_miss = True try:", "in results] def get_urls_from_google_hybrid_search(self, keyword): '''Return (cache_path, results)''' old_url = self.get_google_search_url(keyword) result =", "json.loads(result) def get_urls_from_google_search(self, keyword): results = self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'], x['titleNoFormatting']) for x in", "x in results] def get_urls_from_google_hybrid_search(self, keyword): '''Return (cache_path, results)''' old_url = self.get_google_search_url(keyword) result", "from cache (%s)' % (url, self.get_path(url))) return result ################################################################ # GOOGLE SUGGEST GOOGLE_SUGGEST_URL", "path = self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write(self.comment(url)) fout.write(content) def write_error(self,", "path = self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write('ERROR\\n') fout.write(error.replace('\\n', ' ')", "if after: answer += '&cp=' + str(len(before)) return answer def get_from_google_suggest(self, before, after=''):", "each result are # - url (+ unescapedUrl, visibleUrl, cacheUrl) # - titleNoFormatting", "json.loads(result)['responseData']['results'] except: # Google nailed me! Exponential backoff! print >> sys.stderr, ('Hide from", "return fin.read() def write(self, url, content, already_hashed=False): path = self.get_path(url, already_hashed) with open(path,", "[(x['link'], x.get('title', '')) for x in results] def get_urls_from_google_hybrid_search(self, keyword): '''Return (cache_path, results)'''", "% (url, self.get_path(url))) return result ################################################################ # GOOGLE SUGGEST GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def", "# Found result in cache try: results = json.loads(result)['responseData']['results'] return (self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting'])", "subprocess, time from blacklist import BLACKLIST BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class WebpageCache(object): def", "'')) for x in results] def get_urls_from_google_hybrid_search(self, keyword): '''Return (cache_path, results)''' old_url =", "%s\" % content_type) result = response.read() self.write(url, result) except Exception, e: if self.log:", "path]) def comment(self, url): return ' '.join(('<!--', urllib.quote(url), self.get_current_datetime(), '-->\\n')) def read(self, url,", "get_hashcode(self, url): return hashlib.sha1(url).hexdigest() def get_path(self, url, already_hashed=False): if not already_hashed: url =", "get_from_google_search(self, keyword, raw=False): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) if raw: return", "results] GOOGLE_PAUSE = 30 def get_from_google_search_with_backoff(self, keyword): url = self.get_google_search_url(keyword) result = self.get_page(url,", "get_google_custom_search_url(self, keyword): answer = self.CUSTOM_GOOGLE_SEARCH_URL % \\ (self.api_key, self.cx, urllib.quote(keyword)) return answer def", "to prevent non-HTML content_type = response.info().type if check_html and content_type != 'text/html': raise", "(compatible; MSIE 7.0; Windows NT 6.0)')] response = opener.open(url, timeout=self.timeout) # Check content", "seconds ...' % WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *= 2 result = self.get_page(url, check_html=False, force=True)", "api_key self.cx = cx def get_google_custom_search_url(self, keyword): answer = self.CUSTOM_GOOGLE_SEARCH_URL % \\ (self.api_key,", "self.cx, urllib.quote(keyword)) return answer def get_from_google_custom_search(self, keyword, raw=False): url = self.get_google_custom_search_url(keyword) answer =", "if not os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log = log self.cache_miss = False self.timeout = timeout", "self.write(url, result) except Exception, e: if self.log: print >> sys.stderr, '[ERROR] ', e", "(self.api_key, self.cx, urllib.quote(keyword)) return answer def get_from_google_custom_search(self, keyword, raw=False): url = self.get_google_custom_search_url(keyword) answer", "False if isinstance(result, WebLoadingError): if self.log: print >> sys.stderr, '[ERROR]', result result =", "'Retrieved \"%s\"' % url else: print >> sys.stderr, ('Loaded \"%s\" from cache (%s)'", "# -*- coding: utf-8 -*- import urllib, urllib2, urlparse, socket import json, sys,", "url, already_hashed=False): if not already_hashed: url = self.get_hashcode(url) return os.path.join(self.cachePath, url) def get_current_datetime(self):", "== urllib.quote(url), path if error: return WebLoadingError(error_message) else: return fin.read() def write(self, url,", "raise WebLoadingError('URL %s in blacklist' % url) # Open web page opener =", "for x in results] def get_urls_from_google_hybrid_search(self, keyword): '''Return (cache_path, results)''' old_url = self.get_google_search_url(keyword)", "x['titleNoFormatting']) for x in results] GOOGLE_PAUSE = 30 def get_from_google_search_with_backoff(self, keyword): url =", "sys.stderr, 'Downloading from', url, '...' # Check blacklist parsed_url = urlparse.urlparse(url) if parsed_url.netloc", "as fout: fout.write(self.comment(url)) fout.write(content) def write_error(self, url, error, already_hashed=False): path = self.get_path(url, already_hashed)", "import json, sys, os, hashlib, subprocess, time from blacklist import BLACKLIST BASEDIR =", "raw=False): url = self.get_google_custom_search_url(keyword) answer = self.get_page(url, check_html=False) if raw: return answer return", "GOOGLE_PAUSE = 30 def get_from_google_search_with_backoff(self, keyword): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False)", "return json.loads(result) def get_urls_from_google_search(self, keyword): results = self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'], x['titleNoFormatting']) for x", "= self.get_google_search_url(keyword) result = self.read(old_url) if result and not isinstance(result, WebLoadingError): # Found", "result = self.get_page(url, check_html=False, force=True) def get_urls_from_google_search_with_backoff(self, keyword): results = self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'],", "str(len(before)) return answer def get_from_google_suggest(self, before, after=''): url = self.get_google_suggest_url(before, after) return json.loads(self.get_page(url,", "result = self.read(old_url) if result and not isinstance(result, WebLoadingError): # Found result in", "answer += '&cp=' + str(len(before)) return answer def get_from_google_suggest(self, before, after=''): url =", "timeout=self.timeout) # Check content type to prevent non-HTML content_type = response.info().type if check_html", "if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)): self.write_error(url, str(e.message)) result = None if self.log: if", "# GOOGLE SEARCH -- old API # The important fields of each result", "Check blacklist parsed_url = urlparse.urlparse(url) if parsed_url.netloc in BLACKLIST: raise WebLoadingError('URL %s in", "self.get_page(url, check_html=False) if raw: return answer return json.loads(answer) def get_urls_from_google_custom_search(self, keyword): results =", "get_from_google_search_with_backoff(self, keyword): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) while True: try: return", "def get_google_custom_search_url(self, keyword): answer = self.CUSTOM_GOOGLE_SEARCH_URL % \\ (self.api_key, self.cx, urllib.quote(keyword)) return answer", "= self.CUSTOM_GOOGLE_SEARCH_URL % \\ (self.api_key, self.cx, urllib.quote(keyword)) return answer def get_from_google_custom_search(self, keyword, raw=False):", "self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write(self.comment(url)) fout.write(content) def write_error(self, url, error,", "'text/html': raise WebLoadingError(\"Non-HTML response: %s\" % content_type) result = response.read() self.write(url, result) except", "30 def get_from_google_search_with_backoff(self, keyword): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) while True:", "if self.log: print >> sys.stderr, '[ERROR]', result result = None else: self.cache_miss =", "self.log: print >> sys.stderr, '[ERROR] ', e if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)): self.write_error(url,", "+ str(len(before)) return answer def get_from_google_suggest(self, before, after=''): url = self.get_google_suggest_url(before, after) return", "def get_urls_from_google_custom_search(self, keyword): results = self.get_from_google_custom_search(keyword)['items'] return [(x['link'], x.get('title', '')) for x in", "except: # Google nailed me! Exponential backoff! print >> sys.stderr, ('Hide from Google", "= self.get_page(url, check_html=False) if raw: return result return json.loads(result) def get_urls_from_google_search(self, keyword): results", "*= 2 result = self.get_page(url, check_html=False, force=True) def get_urls_from_google_search_with_backoff(self, keyword): results = self.get_from_google_search_with_backoff(keyword)", "not os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log = log self.cache_miss = False self.timeout = timeout def", "(self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception): def __init__(self, msg): self.args = (msg,) self.msg = msg", "self.cache_miss = False if isinstance(result, WebLoadingError): if self.log: print >> sys.stderr, '[ERROR]', result", "url) # Open web page opener = urllib2.build_opener() opener.addheaders = [ ('User-agent', 'Mozilla/5.0", "return answer return json.loads(answer) def get_urls_from_google_custom_search(self, keyword): results = self.get_from_google_custom_search(keyword)['items'] return [(x['link'], x.get('title',", "... pass # Use Custom search return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception): def __init__(self,", "url): return hashlib.sha1(url).hexdigest() def get_path(self, url, already_hashed=False): if not already_hashed: url = self.get_hashcode(url)", "unescapedUrl, visibleUrl, cacheUrl) # - titleNoFormatting (+ title) # - content GOOGLE_SEARCH_URL =", "def get_hashcode(self, url): return hashlib.sha1(url).hexdigest() def get_path(self, url, already_hashed=False): if not already_hashed: url", "= check_url.split() assert len(tokens) > 2 and tokens[1] == urllib.quote(url), path if error:", "force: self.cache_miss = False if isinstance(result, WebLoadingError): if self.log: print >> sys.stderr, '[ERROR]',", "opener.addheaders = [ ('User-agent', 'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 6.0)')] response =", "url) def get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def open_in_browser(self, hashcode, browser=\"firefox\"): path = os.path.join(self.cachePath,", "GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self, keyword): answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword) return answer", "self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception): def __init__(self, msg): self.args = (msg,) self.msg = msg self.message", "self.cache_miss = False self.timeout = timeout def get_hashcode(self, url): return hashlib.sha1(url).hexdigest() def get_path(self,", "browser=\"firefox\"): path = os.path.join(self.cachePath, hashcode) subprocess.call([browser, path]) def comment(self, url): return ' '.join(('<!--',", "True error_message = fin.readline().strip() check_url = fin.readline() if not already_hashed: tokens = check_url.split()", "already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write('ERROR\\n') fout.write(error.replace('\\n', '", "hashcode, browser=\"firefox\"): path = os.path.join(self.cachePath, hashcode) subprocess.call([browser, path]) def comment(self, url): return '", "socket.error)): self.write_error(url, str(e.message)) result = None if self.log: if self.cache_miss: print >> sys.stderr,", "SEARCH -- Custom Search CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self, api_key, cx): self.api_key", "if self.log: print >> sys.stderr, 'Downloading from', url, '...' # Check blacklist parsed_url", "def get_from_google_search(self, keyword, raw=False): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) if raw:", "self.get_google_custom_search_url(keyword) answer = self.get_page(url, check_html=False) if raw: return answer return json.loads(answer) def get_urls_from_google_custom_search(self,", "return answer def get_from_google_search(self, keyword, raw=False): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False)", "= 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self, api_key, cx): self.api_key = api_key self.cx = cx", "fout.write(content) def write_error(self, url, error, already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w')", "result and not force: self.cache_miss = False if isinstance(result, WebLoadingError): if self.log: print", "def get_from_google_search_with_backoff(self, keyword): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) while True: try:", "parsed_url = urlparse.urlparse(url) if parsed_url.netloc in BLACKLIST: raise WebLoadingError('URL %s in blacklist' %", "self.get_path(url))) return result ################################################################ # GOOGLE SUGGEST GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self, before,", "x['titleNoFormatting']) for x in results] ################################################################ # GOOGLE SEARCH -- Custom Search CUSTOM_GOOGLE_SEARCH_URL", "= self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write(self.comment(url)) fout.write(content) def write_error(self, url,", "urllib.quote(before) + urllib.quote(after) if after: answer += '&cp=' + str(len(before)) return answer def", "json.loads(answer) def get_urls_from_google_custom_search(self, keyword): results = self.get_from_google_custom_search(keyword)['items'] return [(x['link'], x.get('title', '')) for x", "urlparse.urlparse(url) if parsed_url.netloc in BLACKLIST: raise WebLoadingError('URL %s in blacklist' % url) #", "= self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write('ERROR\\n') fout.write(error.replace('\\n', ' ') +", "return (self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]) except: # Stale bad cache", "after=''): answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after) if after: answer += '&cp='", "error: return WebLoadingError(error_message) else: return fin.read() def write(self, url, content, already_hashed=False): path =", "if isinstance(result, WebLoadingError): if self.log: print >> sys.stderr, '[ERROR]', result result = None", "results = self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] GOOGLE_PAUSE = 30", "me! Exponential backoff! print >> sys.stderr, ('Hide from Google for %d seconds ...'", "results] ################################################################ # GOOGLE SEARCH -- Custom Search CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def", "check_html and content_type != 'text/html': raise WebLoadingError(\"Non-HTML response: %s\" % content_type) result =", "7.0; Windows NT 6.0)')] response = opener.open(url, timeout=self.timeout) # Check content type to", "from blacklist import BLACKLIST BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class WebpageCache(object): def __init__(self, basedir=BASEDIR,", "path = self.get_path(url, already_hashed) if os.path.exists(path): with open(path) as fin: error = False", "'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 6.0)')] response = opener.open(url, timeout=self.timeout) # Check", "search return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception): def __init__(self, msg): self.args = (msg,) self.msg", ">> sys.stderr, 'Retrieved \"%s\"' % url else: print >> sys.stderr, ('Loaded \"%s\" from", "Exponential backoff! print >> sys.stderr, ('Hide from Google for %d seconds ...' %", "# - url (+ unescapedUrl, visibleUrl, cacheUrl) # - titleNoFormatting (+ title) #", "# Use Custom search return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception): def __init__(self, msg): self.args", "def get_google_suggest_url(self, before, after=''): answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after) if after:", "and not force: self.cache_miss = False if isinstance(result, WebLoadingError): if self.log: print >>", "False self.timeout = timeout def get_hashcode(self, url): return hashlib.sha1(url).hexdigest() def get_path(self, url, already_hashed=False):", "utf-8 -*- import urllib, urllib2, urlparse, socket import json, sys, os, hashlib, subprocess,", "if self.cache_miss: print >> sys.stderr, 'Retrieved \"%s\"' % url else: print >> sys.stderr,", "if self.log: if self.cache_miss: print >> sys.stderr, 'Retrieved \"%s\"' % url else: print", "force=True) def get_urls_from_google_search_with_backoff(self, keyword): results = self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'], x['titleNoFormatting']) for x in", "sys.stderr, '[ERROR] ', e if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)): self.write_error(url, str(e.message)) result =", "Stale bad cache ... pass # Use Custom search return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class", "if raw: return answer return json.loads(answer) def get_urls_from_google_custom_search(self, keyword): results = self.get_from_google_custom_search(keyword)['items'] return", "check_html=False) while True: try: return json.loads(result)['responseData']['results'] except: # Google nailed me! Exponential backoff!", "isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)): self.write_error(url, str(e.message)) result = None if self.log: if self.cache_miss:", "already_hashed=False): if not already_hashed: url = self.get_hashcode(url) return os.path.join(self.cachePath, url) def get_current_datetime(self): return", "self.cache_miss = True try: if self.log: print >> sys.stderr, 'Downloading from', url, '...'", "check_html=False))[1] ################################################################ # GOOGLE SEARCH -- old API # The important fields of", "# The important fields of each result are # - url (+ unescapedUrl,", "class WebpageCache(object): def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15): self.cachePath = os.path.join(basedir, dirname) if", "= json.loads(result)['responseData']['results'] return (self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]) except: # Stale", "+ urllib.quote(after) if after: answer += '&cp=' + str(len(before)) return answer def get_from_google_suggest(self,", "for %d seconds ...' % WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *= 2 result = self.get_page(url,", "check_html=False, force=True) def get_urls_from_google_search_with_backoff(self, keyword): results = self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'], x['titleNoFormatting']) for x", "return WebLoadingError(error_message) else: return fin.read() def write(self, url, content, already_hashed=False): path = self.get_path(url,", "get_google_search_url(self, keyword): answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword) return answer def get_from_google_search(self, keyword, raw=False):", "return json.loads(answer) def get_urls_from_google_custom_search(self, keyword): results = self.get_from_google_custom_search(keyword)['items'] return [(x['link'], x.get('title', '')) for", "cacheUrl) # - titleNoFormatting (+ title) # - content GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def", "backoff! print >> sys.stderr, ('Hide from Google for %d seconds ...' % WebpageCache.GOOGLE_PAUSE)", ">> sys.stderr, 'Downloading from', url, '...' # Check blacklist parsed_url = urlparse.urlparse(url) if", "content_type != 'text/html': raise WebLoadingError(\"Non-HTML response: %s\" % content_type) result = response.read() self.write(url,", "and content_type != 'text/html': raise WebLoadingError(\"Non-HTML response: %s\" % content_type) result = response.read()", "error = False check_url = fin.readline().strip() if check_url == 'ERROR': error = True", "write_error(self, url, error, already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w') as fout:", "if check_html and content_type != 'text/html': raise WebLoadingError(\"Non-HTML response: %s\" % content_type) result", "answer def get_from_google_custom_search(self, keyword, raw=False): url = self.get_google_custom_search_url(keyword) answer = self.get_page(url, check_html=False) if", "= os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class WebpageCache(object): def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15): self.cachePath =", "url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) if raw: return result return json.loads(result)", "% url else: print >> sys.stderr, ('Loaded \"%s\" from cache (%s)' % (url,", "result = None else: self.cache_miss = True try: if self.log: print >> sys.stderr,", "CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self, api_key, cx): self.api_key = api_key self.cx =", "if parsed_url.netloc in BLACKLIST: raise WebLoadingError('URL %s in blacklist' % url) # Open", "json.loads(result)['responseData']['results'] return (self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]) except: # Stale bad", "'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self, before, after=''): answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after) if", "tokens = check_url.split() assert len(tokens) > 2 and tokens[1] == urllib.quote(url), path if", "e if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)): self.write_error(url, str(e.message)) result = None if self.log:", "def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15): self.cachePath = os.path.join(basedir, dirname) if not os.path.exists(self.cachePath):", "check_html=False) if raw: return result return json.loads(result) def get_urls_from_google_search(self, keyword): results = self.get_from_google_search(keyword)['responseData']['results']", "= 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self, keyword): answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword) return answer def", "self.get_hashcode(url) return os.path.join(self.cachePath, url) def get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def open_in_browser(self, hashcode, browser=\"firefox\"):", "str(e.message)) result = None if self.log: if self.cache_miss: print >> sys.stderr, 'Retrieved \"%s\"'", "raw: return answer return json.loads(answer) def get_urls_from_google_custom_search(self, keyword): results = self.get_from_google_custom_search(keyword)['items'] return [(x['link'],", "fout.write(self.comment(url)) def get_page(self, url, force=False, check_html=True): result = self.read(url) if result and not", "WebLoadingError(error_message) else: return fin.read() def write(self, url, content, already_hashed=False): path = self.get_path(url, already_hashed)", "before, after=''): url = self.get_google_suggest_url(before, after) return json.loads(self.get_page(url, check_html=False))[1] ################################################################ # GOOGLE SEARCH", "return json.loads(result)['responseData']['results'] except: # Google nailed me! Exponential backoff! print >> sys.stderr, ('Hide", "time from blacklist import BLACKLIST BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class WebpageCache(object): def __init__(self,", "comment(self, url): return ' '.join(('<!--', urllib.quote(url), self.get_current_datetime(), '-->\\n')) def read(self, url, already_hashed=False): path", "self.log: print >> sys.stderr, 'Downloading from', url, '...' # Check blacklist parsed_url =", "'''Return (cache_path, results)''' old_url = self.get_google_search_url(keyword) result = self.read(old_url) if result and not", "results = self.get_from_google_custom_search(keyword)['items'] return [(x['link'], x.get('title', '')) for x in results] def get_urls_from_google_hybrid_search(self,", "before, after=''): answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after) if after: answer +=", "self.get_page(url, check_html=False) if raw: return result return json.loads(result) def get_urls_from_google_search(self, keyword): results =", "already_hashed) with open(path, 'w') as fout: fout.write(self.comment(url)) fout.write(content) def write_error(self, url, error, already_hashed=False):", "time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *= 2 result = self.get_page(url, check_html=False, force=True) def get_urls_from_google_search_with_backoff(self, keyword): results", "if not already_hashed: url = self.get_hashcode(url) return os.path.join(self.cachePath, url) def get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\",", "response.read() self.write(url, result) except Exception, e: if self.log: print >> sys.stderr, '[ERROR] ',", "################################################################ # GOOGLE SEARCH -- Custom Search CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self,", "fin.readline().strip() if check_url == 'ERROR': error = True error_message = fin.readline().strip() check_url =", "get_urls_from_google_custom_search(self, keyword): results = self.get_from_google_custom_search(keyword)['items'] return [(x['link'], x.get('title', '')) for x in results]", "old_url = self.get_google_search_url(keyword) result = self.read(old_url) if result and not isinstance(result, WebLoadingError): #", "url = self.get_google_suggest_url(before, after) return json.loads(self.get_page(url, check_html=False))[1] ################################################################ # GOOGLE SEARCH -- old", "def get_page(self, url, force=False, check_html=True): result = self.read(url) if result and not force:", "results)''' old_url = self.get_google_search_url(keyword) result = self.read(old_url) if result and not isinstance(result, WebLoadingError):", "x in results]) except: # Stale bad cache ... pass # Use Custom", "in blacklist' % url) # Open web page opener = urllib2.build_opener() opener.addheaders =", "os.path.join(basedir, dirname) if not os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log = log self.cache_miss = False self.timeout", "('User-agent', 'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 6.0)')] response = opener.open(url, timeout=self.timeout) #", "keyword): results = self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] GOOGLE_PAUSE =", "= timeout def get_hashcode(self, url): return hashlib.sha1(url).hexdigest() def get_path(self, url, already_hashed=False): if not", "time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def open_in_browser(self, hashcode, browser=\"firefox\"): path = os.path.join(self.cachePath, hashcode) subprocess.call([browser, path]) def", "fout.write(self.comment(url)) fout.write(content) def write_error(self, url, error, already_hashed=False): path = self.get_path(url, already_hashed) with open(path,", "and not isinstance(result, WebLoadingError): # Found result in cache try: results = json.loads(result)['responseData']['results']", "basedir=BASEDIR, dirname='web.cache', log=True, timeout=15): self.cachePath = os.path.join(basedir, dirname) if not os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log", "The important fields of each result are # - url (+ unescapedUrl, visibleUrl,", "isinstance(result, WebLoadingError): if self.log: print >> sys.stderr, '[ERROR]', result result = None else:", "result are # - url (+ unescapedUrl, visibleUrl, cacheUrl) # - titleNoFormatting (+", "if raw: return result return json.loads(result) def get_urls_from_google_search(self, keyword): results = self.get_from_google_search(keyword)['responseData']['results'] return", "result) except Exception, e: if self.log: print >> sys.stderr, '[ERROR] ', e if", "SEARCH -- old API # The important fields of each result are #", "already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write(self.comment(url)) fout.write(content) def", "blacklist' % url) # Open web page opener = urllib2.build_opener() opener.addheaders = [", "urllib, urllib2, urlparse, socket import json, sys, os, hashlib, subprocess, time from blacklist", "for x in results] GOOGLE_PAUSE = 30 def get_from_google_search_with_backoff(self, keyword): url = self.get_google_search_url(keyword)", "result = self.get_page(url, check_html=False) while True: try: return json.loads(result)['responseData']['results'] except: # Google nailed", "WebLoadingError): # Found result in cache try: results = json.loads(result)['responseData']['results'] return (self.get_path(old_url), [(x['unescapedUrl'],", "'\\n') fout.write(self.comment(url)) def get_page(self, url, force=False, check_html=True): result = self.read(url) if result and", "= self.get_page(url, check_html=False) while True: try: return json.loads(result)['responseData']['results'] except: # Google nailed me!", "results] def get_urls_from_google_hybrid_search(self, keyword): '''Return (cache_path, results)''' old_url = self.get_google_search_url(keyword) result = self.read(old_url)", "# GOOGLE SEARCH -- Custom Search CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self, api_key,", "%s in blacklist' % url) # Open web page opener = urllib2.build_opener() opener.addheaders", "dirname='web.cache', log=True, timeout=15): self.cachePath = os.path.join(basedir, dirname) if not os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log =", "= self.GOOGLE_SEARCH_URL + urllib.quote(keyword) return answer def get_from_google_search(self, keyword, raw=False): url = self.get_google_search_url(keyword)", "open_in_browser(self, hashcode, browser=\"firefox\"): path = os.path.join(self.cachePath, hashcode) subprocess.call([browser, path]) def comment(self, url): return", "- titleNoFormatting (+ title) # - content GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self, keyword):", "error, already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write('ERROR\\n') fout.write(error.replace('\\n',", "check_url = fin.readline() if not already_hashed: tokens = check_url.split() assert len(tokens) > 2", "check_url.split() assert len(tokens) > 2 and tokens[1] == urllib.quote(url), path if error: return", "error_message = fin.readline().strip() check_url = fin.readline() if not already_hashed: tokens = check_url.split() assert", "# - content GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self, keyword): answer = self.GOOGLE_SEARCH_URL +", "None else: self.cache_miss = True try: if self.log: print >> sys.stderr, 'Downloading from',", "log=True, timeout=15): self.cachePath = os.path.join(basedir, dirname) if not os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log = log", "'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self, api_key, cx): self.api_key = api_key self.cx = cx def", "else: print >> sys.stderr, ('Loaded \"%s\" from cache (%s)' % (url, self.get_path(url))) return", "len(tokens) > 2 and tokens[1] == urllib.quote(url), path if error: return WebLoadingError(error_message) else:", "= self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] GOOGLE_PAUSE = 30 def", "already_hashed: tokens = check_url.split() assert len(tokens) > 2 and tokens[1] == urllib.quote(url), path", "GOOGLE SEARCH -- old API # The important fields of each result are", "return ' '.join(('<!--', urllib.quote(url), self.get_current_datetime(), '-->\\n')) def read(self, url, already_hashed=False): path = self.get_path(url,", "fin.readline() if not already_hashed: tokens = check_url.split() assert len(tokens) > 2 and tokens[1]", "+ urllib.quote(keyword) return answer def get_from_google_search(self, keyword, raw=False): url = self.get_google_search_url(keyword) result =", "+ '\\n') fout.write(self.comment(url)) def get_page(self, url, force=False, check_html=True): result = self.read(url) if result", "web page opener = urllib2.build_opener() opener.addheaders = [ ('User-agent', 'Mozilla/5.0 (compatible; MSIE 7.0;", "class WebLoadingError(Exception): def __init__(self, msg): self.args = (msg,) self.msg = msg self.message =", "self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) while True: try: return json.loads(result)['responseData']['results'] except: # Google", "timeout def get_hashcode(self, url): return hashlib.sha1(url).hexdigest() def get_path(self, url, already_hashed=False): if not already_hashed:", "url, content, already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write(self.comment(url))", "= fin.readline().strip() if check_url == 'ERROR': error = True error_message = fin.readline().strip() check_url", "SUGGEST GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self, before, after=''): answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before)", "= self.get_path(url, already_hashed) if os.path.exists(path): with open(path) as fin: error = False check_url", "with open(path, 'w') as fout: fout.write('ERROR\\n') fout.write(error.replace('\\n', ' ') + '\\n') fout.write(self.comment(url)) def", "are # - url (+ unescapedUrl, visibleUrl, cacheUrl) # - titleNoFormatting (+ title)", "def comment(self, url): return ' '.join(('<!--', urllib.quote(url), self.get_current_datetime(), '-->\\n')) def read(self, url, already_hashed=False):", "'ERROR': error = True error_message = fin.readline().strip() check_url = fin.readline() if not already_hashed:", "result and not isinstance(result, WebLoadingError): # Found result in cache try: results =", "open(path) as fin: error = False check_url = fin.readline().strip() if check_url == 'ERROR':", "blacklist parsed_url = urlparse.urlparse(url) if parsed_url.netloc in BLACKLIST: raise WebLoadingError('URL %s in blacklist'", "else: return fin.read() def write(self, url, content, already_hashed=False): path = self.get_path(url, already_hashed) with", "= self.get_page(url, check_html=False) if raw: return answer return json.loads(answer) def get_urls_from_google_custom_search(self, keyword): results", "self.CUSTOM_GOOGLE_SEARCH_URL % \\ (self.api_key, self.cx, urllib.quote(keyword)) return answer def get_from_google_custom_search(self, keyword, raw=False): url", "= self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) if raw: return result return json.loads(result) def", "return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] GOOGLE_PAUSE = 30 def get_from_google_search_with_backoff(self, keyword):", "'w') as fout: fout.write('ERROR\\n') fout.write(error.replace('\\n', ' ') + '\\n') fout.write(self.comment(url)) def get_page(self, url,", "'-->\\n')) def read(self, url, already_hashed=False): path = self.get_path(url, already_hashed) if os.path.exists(path): with open(path)", "and tokens[1] == urllib.quote(url), path if error: return WebLoadingError(error_message) else: return fin.read() def", "WebLoadingError('URL %s in blacklist' % url) # Open web page opener = urllib2.build_opener()", "isinstance(result, WebLoadingError): # Found result in cache try: results = json.loads(result)['responseData']['results'] return (self.get_path(old_url),", "x['titleNoFormatting']) for x in results]) except: # Stale bad cache ... pass #", "fields of each result are # - url (+ unescapedUrl, visibleUrl, cacheUrl) #", "while True: try: return json.loads(result)['responseData']['results'] except: # Google nailed me! Exponential backoff! print", "cx def get_google_custom_search_url(self, keyword): answer = self.CUSTOM_GOOGLE_SEARCH_URL % \\ (self.api_key, self.cx, urllib.quote(keyword)) return", "'..'))) class WebpageCache(object): def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15): self.cachePath = os.path.join(basedir, dirname)", "set_google_custom_search_keys(self, api_key, cx): self.api_key = api_key self.cx = cx def get_google_custom_search_url(self, keyword): answer", "urllib2.URLError, socket.error)): self.write_error(url, str(e.message)) result = None if self.log: if self.cache_miss: print >>", "+ urllib.quote(before) + urllib.quote(after) if after: answer += '&cp=' + str(len(before)) return answer", "\\ (self.api_key, self.cx, urllib.quote(keyword)) return answer def get_from_google_custom_search(self, keyword, raw=False): url = self.get_google_custom_search_url(keyword)", "sys.stderr, ('Hide from Google for %d seconds ...' % WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *=", "with open(path) as fin: error = False check_url = fin.readline().strip() if check_url ==", "in BLACKLIST: raise WebLoadingError('URL %s in blacklist' % url) # Open web page", "= None if self.log: if self.cache_miss: print >> sys.stderr, 'Retrieved \"%s\"' % url", "(url, self.get_path(url))) return result ################################################################ # GOOGLE SUGGEST GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self,", "get_page(self, url, force=False, check_html=True): result = self.read(url) if result and not force: self.cache_miss", "[(x['unescapedUrl'], x['titleNoFormatting']) for x in results] ################################################################ # GOOGLE SEARCH -- Custom Search", "result = None if self.log: if self.cache_miss: print >> sys.stderr, 'Retrieved \"%s\"' %", "get_google_suggest_url(self, before, after=''): answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after) if after: answer", "= urlparse.urlparse(url) if parsed_url.netloc in BLACKLIST: raise WebLoadingError('URL %s in blacklist' % url)", "= self.get_page(url, check_html=False, force=True) def get_urls_from_google_search_with_backoff(self, keyword): results = self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'], x['titleNoFormatting'])", "check_url = fin.readline().strip() if check_url == 'ERROR': error = True error_message = fin.readline().strip()", "url, force=False, check_html=True): result = self.read(url) if result and not force: self.cache_miss =", "cache ... pass # Use Custom search return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception): def", "try: return json.loads(result)['responseData']['results'] except: # Google nailed me! Exponential backoff! print >> sys.stderr,", "pass # Use Custom search return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception): def __init__(self, msg):", "WebpageCache.GOOGLE_PAUSE *= 2 result = self.get_page(url, check_html=False, force=True) def get_urls_from_google_search_with_backoff(self, keyword): results =", "socket import json, sys, os, hashlib, subprocess, time from blacklist import BLACKLIST BASEDIR", "return answer def get_from_google_custom_search(self, keyword, raw=False): url = self.get_google_custom_search_url(keyword) answer = self.get_page(url, check_html=False)", "if self.log: print >> sys.stderr, '[ERROR] ', e if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)):", "'.join(('<!--', urllib.quote(url), self.get_current_datetime(), '-->\\n')) def read(self, url, already_hashed=False): path = self.get_path(url, already_hashed) if", "except Exception, e: if self.log: print >> sys.stderr, '[ERROR] ', e if isinstance(e,", "coding: utf-8 -*- import urllib, urllib2, urlparse, socket import json, sys, os, hashlib,", "blacklist import BLACKLIST BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class WebpageCache(object): def __init__(self, basedir=BASEDIR, dirname='web.cache',", "[ ('User-agent', 'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 6.0)')] response = opener.open(url, timeout=self.timeout)", "def get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def open_in_browser(self, hashcode, browser=\"firefox\"): path = os.path.join(self.cachePath, hashcode)", "already_hashed) with open(path, 'w') as fout: fout.write('ERROR\\n') fout.write(error.replace('\\n', ' ') + '\\n') fout.write(self.comment(url))", "error = True error_message = fin.readline().strip() check_url = fin.readline() if not already_hashed: tokens", "= os.path.join(basedir, dirname) if not os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log = log self.cache_miss = False", "Search CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self, api_key, cx): self.api_key = api_key self.cx", "== 'ERROR': error = True error_message = fin.readline().strip() check_url = fin.readline() if not", "keyword, raw=False): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) if raw: return result", "(+ unescapedUrl, visibleUrl, cacheUrl) # - titleNoFormatting (+ title) # - content GOOGLE_SEARCH_URL", "keyword): '''Return (cache_path, results)''' old_url = self.get_google_search_url(keyword) result = self.read(old_url) if result and", "self.get_google_suggest_url(before, after) return json.loads(self.get_page(url, check_html=False))[1] ################################################################ # GOOGLE SEARCH -- old API #", "= self.get_google_suggest_url(before, after) return json.loads(self.get_page(url, check_html=False))[1] ################################################################ # GOOGLE SEARCH -- old API", "= True try: if self.log: print >> sys.stderr, 'Downloading from', url, '...' #", "keyword): results = self.get_from_google_custom_search(keyword)['items'] return [(x['link'], x.get('title', '')) for x in results] def", "= False check_url = fin.readline().strip() if check_url == 'ERROR': error = True error_message", "not force: self.cache_miss = False if isinstance(result, WebLoadingError): if self.log: print >> sys.stderr,", "'Downloading from', url, '...' # Check blacklist parsed_url = urlparse.urlparse(url) if parsed_url.netloc in", "api_key, cx): self.api_key = api_key self.cx = cx def get_google_custom_search_url(self, keyword): answer =", "result = self.read(url) if result and not force: self.cache_miss = False if isinstance(result,", "check_html=False) if raw: return answer return json.loads(answer) def get_urls_from_google_custom_search(self, keyword): results = self.get_from_google_custom_search(keyword)['items']", "MSIE 7.0; Windows NT 6.0)')] response = opener.open(url, timeout=self.timeout) # Check content type", "= True error_message = fin.readline().strip() check_url = fin.readline() if not already_hashed: tokens =", "= [ ('User-agent', 'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 6.0)')] response = opener.open(url,", "tokens[1] == urllib.quote(url), path if error: return WebLoadingError(error_message) else: return fin.read() def write(self,", "hashlib.sha1(url).hexdigest() def get_path(self, url, already_hashed=False): if not already_hashed: url = self.get_hashcode(url) return os.path.join(self.cachePath,", ">> sys.stderr, '[ERROR] ', e if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)): self.write_error(url, str(e.message)) result", "if result and not force: self.cache_miss = False if isinstance(result, WebLoadingError): if self.log:", "opener = urllib2.build_opener() opener.addheaders = [ ('User-agent', 'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT", "response: %s\" % content_type) result = response.read() self.write(url, result) except Exception, e: if", "return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception): def __init__(self, msg): self.args = (msg,) self.msg =", "') + '\\n') fout.write(self.comment(url)) def get_page(self, url, force=False, check_html=True): result = self.read(url) if", "title) # - content GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self, keyword): answer = self.GOOGLE_SEARCH_URL", "parsed_url.netloc in BLACKLIST: raise WebLoadingError('URL %s in blacklist' % url) # Open web", "BLACKLIST BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class WebpageCache(object): def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15):", "= cx def get_google_custom_search_url(self, keyword): answer = self.CUSTOM_GOOGLE_SEARCH_URL % \\ (self.api_key, self.cx, urllib.quote(keyword))", "def read(self, url, already_hashed=False): path = self.get_path(url, already_hashed) if os.path.exists(path): with open(path) as", "def get_urls_from_google_hybrid_search(self, keyword): '''Return (cache_path, results)''' old_url = self.get_google_search_url(keyword) result = self.read(old_url) if", "self.api_key = api_key self.cx = cx def get_google_custom_search_url(self, keyword): answer = self.CUSTOM_GOOGLE_SEARCH_URL %", "in results]) except: # Stale bad cache ... pass # Use Custom search", "non-HTML content_type = response.info().type if check_html and content_type != 'text/html': raise WebLoadingError(\"Non-HTML response:", "get_from_google_custom_search(self, keyword, raw=False): url = self.get_google_custom_search_url(keyword) answer = self.get_page(url, check_html=False) if raw: return", "#!/usr/bin/env python # -*- coding: utf-8 -*- import urllib, urllib2, urlparse, socket import", "check_url == 'ERROR': error = True error_message = fin.readline().strip() check_url = fin.readline() if", "result = self.get_page(url, check_html=False) if raw: return result return json.loads(result) def get_urls_from_google_search(self, keyword):", "urllib.quote(keyword)) return answer def get_from_google_custom_search(self, keyword, raw=False): url = self.get_google_custom_search_url(keyword) answer = self.get_page(url,", "(WebLoadingError, urllib2.URLError, socket.error)): self.write_error(url, str(e.message)) result = None if self.log: if self.cache_miss: print", "url = self.get_hashcode(url) return os.path.join(self.cachePath, url) def get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def open_in_browser(self,", "WebLoadingError): if self.log: print >> sys.stderr, '[ERROR]', result result = None else: self.cache_miss", "def get_from_google_suggest(self, before, after=''): url = self.get_google_suggest_url(before, after) return json.loads(self.get_page(url, check_html=False))[1] ################################################################ #", "GOOGLE SEARCH -- Custom Search CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self, api_key, cx):", "def get_urls_from_google_search(self, keyword): results = self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]", "after: answer += '&cp=' + str(len(before)) return answer def get_from_google_suggest(self, before, after=''): url", "self.log = log self.cache_miss = False self.timeout = timeout def get_hashcode(self, url): return", "already_hashed=False): path = self.get_path(url, already_hashed) if os.path.exists(path): with open(path) as fin: error =", "', e if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)): self.write_error(url, str(e.message)) result = None if", "x in results] ################################################################ # GOOGLE SEARCH -- Custom Search CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\", "self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] GOOGLE_PAUSE = 30 def get_from_google_search_with_backoff(self,", "% WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *= 2 result = self.get_page(url, check_html=False, force=True) def get_urls_from_google_search_with_backoff(self,", "answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after) if after: answer += '&cp=' +", "= fin.readline().strip() check_url = fin.readline() if not already_hashed: tokens = check_url.split() assert len(tokens)", "= response.info().type if check_html and content_type != 'text/html': raise WebLoadingError(\"Non-HTML response: %s\" %", "WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *= 2 result = self.get_page(url, check_html=False, force=True) def get_urls_from_google_search_with_backoff(self, keyword):", "in cache try: results = json.loads(result)['responseData']['results'] return (self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting']) for x in", "return hashlib.sha1(url).hexdigest() def get_path(self, url, already_hashed=False): if not already_hashed: url = self.get_hashcode(url) return", "open(path, 'w') as fout: fout.write('ERROR\\n') fout.write(error.replace('\\n', ' ') + '\\n') fout.write(self.comment(url)) def get_page(self,", "e: if self.log: print >> sys.stderr, '[ERROR] ', e if isinstance(e, (WebLoadingError, urllib2.URLError,", "keyword): results = self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] ################################################################ #", "' '.join(('<!--', urllib.quote(url), self.get_current_datetime(), '-->\\n')) def read(self, url, already_hashed=False): path = self.get_path(url, already_hashed)", "def get_from_google_custom_search(self, keyword, raw=False): url = self.get_google_custom_search_url(keyword) answer = self.get_page(url, check_html=False) if raw:", "page opener = urllib2.build_opener() opener.addheaders = [ ('User-agent', 'Mozilla/5.0 (compatible; MSIE 7.0; Windows", "GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self, before, after=''): answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before) +", "if result and not isinstance(result, WebLoadingError): # Found result in cache try: results", "content, already_hashed=False): path = self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write(self.comment(url)) fout.write(content)", "cache try: results = json.loads(result)['responseData']['results'] return (self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting']) for x in results])", "cx): self.api_key = api_key self.cx = cx def get_google_custom_search_url(self, keyword): answer = self.CUSTOM_GOOGLE_SEARCH_URL", "# Google nailed me! Exponential backoff! print >> sys.stderr, ('Hide from Google for", "Custom search return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception): def __init__(self, msg): self.args = (msg,)", "True try: if self.log: print >> sys.stderr, 'Downloading from', url, '...' # Check", "= self.read(url) if result and not force: self.cache_miss = False if isinstance(result, WebLoadingError):", "import urllib, urllib2, urlparse, socket import json, sys, os, hashlib, subprocess, time from", "= opener.open(url, timeout=self.timeout) # Check content type to prevent non-HTML content_type = response.info().type", "os.path.join(self.cachePath, hashcode) subprocess.call([browser, path]) def comment(self, url): return ' '.join(('<!--', urllib.quote(url), self.get_current_datetime(), '-->\\n'))", "check_html=True): result = self.read(url) if result and not force: self.cache_miss = False if", "from', url, '...' # Check blacklist parsed_url = urlparse.urlparse(url) if parsed_url.netloc in BLACKLIST:", "Windows NT 6.0)')] response = opener.open(url, timeout=self.timeout) # Check content type to prevent", "self.log: if self.cache_miss: print >> sys.stderr, 'Retrieved \"%s\"' % url else: print >>", "self.get_page(url, check_html=False, force=True) def get_urls_from_google_search_with_backoff(self, keyword): results = self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'], x['titleNoFormatting']) for", "Found result in cache try: results = json.loads(result)['responseData']['results'] return (self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting']) for", "import BLACKLIST BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class WebpageCache(object): def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True,", "get_path(self, url, already_hashed=False): if not already_hashed: url = self.get_hashcode(url) return os.path.join(self.cachePath, url) def", "log self.cache_miss = False self.timeout = timeout def get_hashcode(self, url): return hashlib.sha1(url).hexdigest() def", "already_hashed) if os.path.exists(path): with open(path) as fin: error = False check_url = fin.readline().strip()", "urllib.quote(after) if after: answer += '&cp=' + str(len(before)) return answer def get_from_google_suggest(self, before,", "get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def open_in_browser(self, hashcode, browser=\"firefox\"): path = os.path.join(self.cachePath, hashcode) subprocess.call([browser,", "# - titleNoFormatting (+ title) # - content GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self,", "os, hashlib, subprocess, time from blacklist import BLACKLIST BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class", "return answer def get_from_google_suggest(self, before, after=''): url = self.get_google_suggest_url(before, after) return json.loads(self.get_page(url, check_html=False))[1]", "time.gmtime()) def open_in_browser(self, hashcode, browser=\"firefox\"): path = os.path.join(self.cachePath, hashcode) subprocess.call([browser, path]) def comment(self,", "= self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) while True: try: return json.loads(result)['responseData']['results'] except: #", "url = self.get_google_custom_search_url(keyword) answer = self.get_page(url, check_html=False) if raw: return answer return json.loads(answer)", "result in cache try: results = json.loads(result)['responseData']['results'] return (self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting']) for x", "python # -*- coding: utf-8 -*- import urllib, urllib2, urlparse, socket import json,", "% url) # Open web page opener = urllib2.build_opener() opener.addheaders = [ ('User-agent',", "print >> sys.stderr, '[ERROR] ', e if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)): self.write_error(url, str(e.message))", "os.path.exists(path): with open(path) as fin: error = False check_url = fin.readline().strip() if check_url", "> 2 and tokens[1] == urllib.quote(url), path if error: return WebLoadingError(error_message) else: return", "print >> sys.stderr, ('Loaded \"%s\" from cache (%s)' % (url, self.get_path(url))) return result", "for x in results] ################################################################ # GOOGLE SEARCH -- Custom Search CUSTOM_GOOGLE_SEARCH_URL =", "'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self, keyword): answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword) return answer def get_from_google_search(self,", "url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) while True: try: return json.loads(result)['responseData']['results'] except:", "GOOGLE SUGGEST GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self, before, after=''): answer = self.GOOGLE_SUGGEST_URL +", "fout.write('ERROR\\n') fout.write(error.replace('\\n', ' ') + '\\n') fout.write(self.comment(url)) def get_page(self, url, force=False, check_html=True): result", "urllib.quote(keyword) return answer def get_from_google_search(self, keyword, raw=False): url = self.get_google_search_url(keyword) result = self.get_page(url,", "return [(x['link'], x.get('title', '')) for x in results] def get_urls_from_google_hybrid_search(self, keyword): '''Return (cache_path,", "content GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q=' def get_google_search_url(self, keyword): answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword) return", "%d seconds ...' % WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE *= 2 result = self.get_page(url, check_html=False,", "self.get_path(url, already_hashed) with open(path, 'w') as fout: fout.write('ERROR\\n') fout.write(error.replace('\\n', ' ') + '\\n')", "Google nailed me! Exponential backoff! print >> sys.stderr, ('Hide from Google for %d", "= api_key self.cx = cx def get_google_custom_search_url(self, keyword): answer = self.CUSTOM_GOOGLE_SEARCH_URL % \\", "json.loads(self.get_page(url, check_html=False))[1] ################################################################ # GOOGLE SEARCH -- old API # The important fields", "# Check content type to prevent non-HTML content_type = response.info().type if check_html and", "def get_path(self, url, already_hashed=False): if not already_hashed: url = self.get_hashcode(url) return os.path.join(self.cachePath, url)", "important fields of each result are # - url (+ unescapedUrl, visibleUrl, cacheUrl)", "= None else: self.cache_miss = True try: if self.log: print >> sys.stderr, 'Downloading", ">> sys.stderr, ('Hide from Google for %d seconds ...' % WebpageCache.GOOGLE_PAUSE) time.sleep(WebpageCache.GOOGLE_PAUSE) WebpageCache.GOOGLE_PAUSE", "\"%s\" from cache (%s)' % (url, self.get_path(url))) return result ################################################################ # GOOGLE SUGGEST", "% content_type) result = response.read() self.write(url, result) except Exception, e: if self.log: print", "url): return ' '.join(('<!--', urllib.quote(url), self.get_current_datetime(), '-->\\n')) def read(self, url, already_hashed=False): path =", "sys.stderr, 'Retrieved \"%s\"' % url else: print >> sys.stderr, ('Loaded \"%s\" from cache", "in results] GOOGLE_PAUSE = 30 def get_from_google_search_with_backoff(self, keyword): url = self.get_google_search_url(keyword) result =", ">> sys.stderr, '[ERROR]', result result = None else: self.cache_miss = True try: if", "answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword) return answer def get_from_google_search(self, keyword, raw=False): url =", ">> sys.stderr, ('Loaded \"%s\" from cache (%s)' % (url, self.get_path(url))) return result ################################################################", "= self.get_from_google_search_with_backoff(keyword) return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] ################################################################ # GOOGLE SEARCH", "= self.get_from_google_custom_search(keyword)['items'] return [(x['link'], x.get('title', '')) for x in results] def get_urls_from_google_hybrid_search(self, keyword):", "os.path.join(self.cachePath, url) def get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def open_in_browser(self, hashcode, browser=\"firefox\"): path =", "timeout=15): self.cachePath = os.path.join(basedir, dirname) if not os.path.exists(self.cachePath): os.mkdir(self.cachePath) self.log = log self.cache_miss", "= response.read() self.write(url, result) except Exception, e: if self.log: print >> sys.stderr, '[ERROR]", "self.cx = cx def get_google_custom_search_url(self, keyword): answer = self.CUSTOM_GOOGLE_SEARCH_URL % \\ (self.api_key, self.cx,", "= self.get_google_custom_search_url(keyword) answer = self.get_page(url, check_html=False) if raw: return answer return json.loads(answer) def", "not isinstance(result, WebLoadingError): # Found result in cache try: results = json.loads(result)['responseData']['results'] return", "WebpageCache(object): def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15): self.cachePath = os.path.join(basedir, dirname) if not", "__init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15): self.cachePath = os.path.join(basedir, dirname) if not os.path.exists(self.cachePath): os.mkdir(self.cachePath)", "print >> sys.stderr, 'Retrieved \"%s\"' % url else: print >> sys.stderr, ('Loaded \"%s\"", "self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after) if after: answer += '&cp=' + str(len(before)) return", "path if error: return WebLoadingError(error_message) else: return fin.read() def write(self, url, content, already_hashed=False):", "path = os.path.join(self.cachePath, hashcode) subprocess.call([browser, path]) def comment(self, url): return ' '.join(('<!--', urllib.quote(url),", "assert len(tokens) > 2 and tokens[1] == urllib.quote(url), path if error: return WebLoadingError(error_message)", "('Loaded \"%s\" from cache (%s)' % (url, self.get_path(url))) return result ################################################################ # GOOGLE", "[(x['unescapedUrl'], x['titleNoFormatting']) for x in results] GOOGLE_PAUSE = 30 def get_from_google_search_with_backoff(self, keyword): url", "get_urls_from_google_search(self, keyword): results = self.get_from_google_search(keyword)['responseData']['results'] return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results] GOOGLE_PAUSE", "'[ERROR]', result result = None else: self.cache_miss = True try: if self.log: print", "results = json.loads(result)['responseData']['results'] return (self.get_path(old_url), [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]) except: #", "with open(path, 'w') as fout: fout.write(self.comment(url)) fout.write(content) def write_error(self, url, error, already_hashed=False): path", "try: if self.log: print >> sys.stderr, 'Downloading from', url, '...' # Check blacklist", "def get_google_search_url(self, keyword): answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword) return answer def get_from_google_search(self, keyword,", "after=''): url = self.get_google_suggest_url(before, after) return json.loads(self.get_page(url, check_html=False))[1] ################################################################ # GOOGLE SEARCH --", "answer def get_from_google_search(self, keyword, raw=False): url = self.get_google_search_url(keyword) result = self.get_page(url, check_html=False) if", "BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..'))) class WebpageCache(object): def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15): self.cachePath", "already_hashed: url = self.get_hashcode(url) return os.path.join(self.cachePath, url) def get_current_datetime(self): return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.gmtime()) def", "bad cache ... pass # Use Custom search return (self.get_path(self.get_google_custom_search_url(keyword)), self.get_urls_from_google_custom_search(keyword)) class WebLoadingError(Exception):", "# Open web page opener = urllib2.build_opener() opener.addheaders = [ ('User-agent', 'Mozilla/5.0 (compatible;", "return result ################################################################ # GOOGLE SUGGEST GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q=' def get_google_suggest_url(self, before, after=''):", "NT 6.0)')] response = opener.open(url, timeout=self.timeout) # Check content type to prevent non-HTML", "get_from_google_suggest(self, before, after=''): url = self.get_google_suggest_url(before, after) return json.loads(self.get_page(url, check_html=False))[1] ################################################################ # GOOGLE", "def open_in_browser(self, hashcode, browser=\"firefox\"): path = os.path.join(self.cachePath, hashcode) subprocess.call([browser, path]) def comment(self, url):", "= False self.timeout = timeout def get_hashcode(self, url): return hashlib.sha1(url).hexdigest() def get_path(self, url,", "-- Custom Search CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\\ 'v1?key=%s&cx=%s&alt=json&safe=high&q=%s' def set_google_custom_search_keys(self, api_key, cx): self.api_key =", "fout.write(error.replace('\\n', ' ') + '\\n') fout.write(self.comment(url)) def get_page(self, url, force=False, check_html=True): result =", "of each result are # - url (+ unescapedUrl, visibleUrl, cacheUrl) # -", "<gh_stars>100-1000 #!/usr/bin/env python # -*- coding: utf-8 -*- import urllib, urllib2, urlparse, socket" ]
[ "b64decode from binascii import hexlify, unhexlify from random import randrange from __main__ import", "hexlify(b64) encoded = list(hexlify(hex)) for x in range(len(encoded)): alpha = int(encoded[x]) + 2", "= b64encode(c_bz2(data)) return c_zlib(hexlify(bz2)) def decompress(self, compressed): unhexed = unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed)) def", "from binascii import hexlify, unhexlify from random import randrange from __main__ import __dict__", "__dict__ as __main__ from bz2 import compress as c_bz2 from bz2 import decompress", "from bz2 import compress as c_bz2 from bz2 import decompress as d_bz2 from", "int(encoded[x]) + 2 encoded[x] = chr(alpha) return ''.join(encoded) def decode(self, encoded): encoded =", "= sha1('TL-Cookies').digest() def makeIV(self): iv = '' for i in range(4): iv +=", "encrypted[4:] key = self.KEY + iv data = self.rc4(data, key) if not data.startswith(self.MAGIC):", "iv def rc4(self, data, key): j = 0 s = range(256) for i", "= self.MAGIC + encoded iv = self.makeIV() key = self.KEY + iv return", "+ 1) % 256 i = (i + s[j]) % 256 s[j], s[i]", "c_bz2 from bz2 import decompress as d_bz2 from zlib import compress as c_zlib", "len(encrypted) < 4: return None iv = encrypted[:4] data = encrypted[4:] key =", "self.MAGIC + encoded iv = self.makeIV() key = self.KEY + iv return iv", "decrypt(self, encrypted): if len(encrypted) < 4: return None iv = encrypted[:4] data =", "decode(self, encoded): encoded = list(encoded) for x in range(len(encoded)): alpha = str(encoded[x]) encoded[x]", "by: Cody/Fd Green Cat Fd (January 31st, 2013) #### # Description: # #", "unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed)) def encrypt(self, data): compressed = self.compress(data) encoded = self.encode(compressed) data", "encoded[x] = str(ord(alpha) - 2) encoded = unhexlify(''.join(encoded)) unhexed = unhexlify(encoded) return b64decode(unhexed)", "= s[i], s[j] results.append(chr(ord(c) ^ s[(s[j] + s[i]) % 256])) return ''.join(results) def", "= self.rc4(data, key) if not data.startswith(self.MAGIC): return None decoded = self.decode(data[len(self.MAGIC):]) return self.decompress(decoded)", "THE TOON LAND PROJECT ########################## # Filename: HackerCrypt.py # Created by: Cody/Fd Green", "= self.KEY + iv data = self.rc4(data, key) if not data.startswith(self.MAGIC): return None", "return b64decode(unhexed) def compress(self, data): bz2 = b64encode(c_bz2(data)) return c_zlib(hexlify(bz2)) def decompress(self, compressed):", "d_bz2(b64decode(unhexed)) def encrypt(self, data): compressed = self.compress(data) encoded = self.encode(compressed) data = self.MAGIC", "import b64encode, b64decode from binascii import hexlify, unhexlify from random import randrange from", "j = (j + 1) % 256 i = (i + s[j]) %", "to meet Toon Land's coding standards. #### from base64 import b64encode, b64decode from", "range(len(encoded)): alpha = str(encoded[x]) encoded[x] = str(ord(alpha) - 2) encoded = unhexlify(''.join(encoded)) unhexed", "256 s[i], s[j] = s[j], s[i] j = i = 0 results =", "''.join(results) def encode(self, data): b64 = b64encode(data) hex = hexlify(b64) encoded = list(hexlify(hex))", "+ s[j]) % 256 s[j], s[i] = s[i], s[j] results.append(chr(ord(c) ^ s[(s[j] +", "s[j] results.append(chr(ord(c) ^ s[(s[j] + s[i]) % 256])) return ''.join(results) def encode(self, data):", "decompress(self, compressed): unhexed = unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed)) def encrypt(self, data): compressed = self.compress(data)", "from bz2 import decompress as d_bz2 from zlib import compress as c_zlib from", "% 256])) return ''.join(results) def encode(self, data): b64 = b64encode(data) hex = hexlify(b64)", "s[j], s[i] j = i = 0 results = [] for c in", "None iv = encrypted[:4] data = encrypted[4:] key = self.KEY + iv data", "= self.compress(data) encoded = self.encode(compressed) data = self.MAGIC + encoded iv = self.makeIV()", "= int(encoded[x]) + 2 encoded[x] = chr(alpha) return ''.join(encoded) def decode(self, encoded): encoded", "# The script has been modified to meet Toon Land's coding standards. ####", "% len(key)])) % 256 s[i], s[j] = s[j], s[i] j = i =", "encoded[x] = chr(alpha) return ''.join(encoded) def decode(self, encoded): encoded = list(encoded) for x", "c in data: j = (j + 1) % 256 i = (i", "for c in data: j = (j + 1) % 256 i =", "iv = self.makeIV() key = self.KEY + iv return iv + self.rc4(data, key)", "zlib import decompress as d_zlib from sha import sha as sha1 class HackerCrypt:", "d_bz2 from zlib import compress as c_zlib from zlib import decompress as d_zlib", "import hexlify, unhexlify from random import randrange from __main__ import __dict__ as __main__", "31st, 2013) #### # Description: # # Encryption method written by Team FD", "c_zlib from zlib import decompress as d_zlib from sha import sha as sha1", "s[(s[j] + s[i]) % 256])) return ''.join(results) def encode(self, data): b64 = b64encode(data)", "decompress as d_zlib from sha import sha as sha1 class HackerCrypt: __version__ =", "= unhexlify(encoded) return b64decode(unhexed) def compress(self, data): bz2 = b64encode(c_bz2(data)) return c_zlib(hexlify(bz2)) def", "= self.KEY + iv return iv + self.rc4(data, key) def decrypt(self, encrypted): if", "# Encryption method written by Team FD in 2011 for their personal releases.", "= list(hexlify(hex)) for x in range(len(encoded)): alpha = int(encoded[x]) + 2 encoded[x] =", "s[i]) % 256])) return ''.join(results) def encode(self, data): b64 = b64encode(data) hex =", "rc4(self, data, key): j = 0 s = range(256) for i in range(256):", "class HackerCrypt: __version__ = 'v1.2.0.2' def __init__(self): self.MAGIC = sha1('[TL]').digest() self.KEY = sha1('TL-Cookies').digest()", "list(encoded) for x in range(len(encoded)): alpha = str(encoded[x]) encoded[x] = str(ord(alpha) - 2)", "0 s = range(256) for i in range(256): j = (j + s[i]", "self.compress(data) encoded = self.encode(compressed) data = self.MAGIC + encoded iv = self.makeIV() key", "data, key): j = 0 s = range(256) for i in range(256): j", "self.MAGIC = sha1('[TL]').digest() self.KEY = sha1('TL-Cookies').digest() def makeIV(self): iv = '' for i", "str(encoded[x]) encoded[x] = str(ord(alpha) - 2) encoded = unhexlify(''.join(encoded)) unhexed = unhexlify(encoded) return", "as c_zlib from zlib import decompress as d_zlib from sha import sha as", "return iv def rc4(self, data, key): j = 0 s = range(256) for", "# Filename: HackerCrypt.py # Created by: Cody/Fd Green Cat Fd (January 31st, 2013)", "makeIV(self): iv = '' for i in range(4): iv += chr(randrange(256)) return iv", "randrange from __main__ import __dict__ as __main__ from bz2 import compress as c_bz2", "__init__(self): self.MAGIC = sha1('[TL]').digest() self.KEY = sha1('TL-Cookies').digest() def makeIV(self): iv = '' for", "data = encrypted[4:] key = self.KEY + iv data = self.rc4(data, key) if", "import compress as c_zlib from zlib import decompress as d_zlib from sha import", "str(ord(alpha) - 2) encoded = unhexlify(''.join(encoded)) unhexed = unhexlify(encoded) return b64decode(unhexed) def compress(self,", "Description: # # Encryption method written by Team FD in 2011 for their", "iv + self.rc4(data, key) def decrypt(self, encrypted): if len(encrypted) < 4: return None", "2013) #### # Description: # # Encryption method written by Team FD in", "+= chr(randrange(256)) return iv def rc4(self, data, key): j = 0 s =", "+ ord(key[i % len(key)])) % 256 s[i], s[j] = s[j], s[i] j =", "import sha as sha1 class HackerCrypt: __version__ = 'v1.2.0.2' def __init__(self): self.MAGIC =", "if len(encrypted) < 4: return None iv = encrypted[:4] data = encrypted[4:] key", "HackerCrypt: __version__ = 'v1.2.0.2' def __init__(self): self.MAGIC = sha1('[TL]').digest() self.KEY = sha1('TL-Cookies').digest() def", "= 'v1.2.0.2' def __init__(self): self.MAGIC = sha1('[TL]').digest() self.KEY = sha1('TL-Cookies').digest() def makeIV(self): iv", "def decode(self, encoded): encoded = list(encoded) for x in range(len(encoded)): alpha = str(encoded[x])", "= str(ord(alpha) - 2) encoded = unhexlify(''.join(encoded)) unhexed = unhexlify(encoded) return b64decode(unhexed) def", "Green Cat Fd (January 31st, 2013) #### # Description: # # Encryption method", "#### from base64 import b64encode, b64decode from binascii import hexlify, unhexlify from random", "from random import randrange from __main__ import __dict__ as __main__ from bz2 import", "Toon Land's coding standards. #### from base64 import b64encode, b64decode from binascii import", "256])) return ''.join(results) def encode(self, data): b64 = b64encode(data) hex = hexlify(b64) encoded", "s[j] = s[j], s[i] j = i = 0 results = [] for", "def encrypt(self, data): compressed = self.compress(data) encoded = self.encode(compressed) data = self.MAGIC +", "HackerCrypt.py # Created by: Cody/Fd Green Cat Fd (January 31st, 2013) #### #", "in 2011 for their personal releases. # The script has been modified to", "as d_zlib from sha import sha as sha1 class HackerCrypt: __version__ = 'v1.2.0.2'", "s[j]) % 256 s[j], s[i] = s[i], s[j] results.append(chr(ord(c) ^ s[(s[j] + s[i])", "= 0 s = range(256) for i in range(256): j = (j +", "unhexlify(encoded) return b64decode(unhexed) def compress(self, data): bz2 = b64encode(c_bz2(data)) return c_zlib(hexlify(bz2)) def decompress(self,", "- 2) encoded = unhexlify(''.join(encoded)) unhexed = unhexlify(encoded) return b64decode(unhexed) def compress(self, data):", "b64encode(c_bz2(data)) return c_zlib(hexlify(bz2)) def decompress(self, compressed): unhexed = unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed)) def encrypt(self,", "__main__ import __dict__ as __main__ from bz2 import compress as c_bz2 from bz2", "compressed): unhexed = unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed)) def encrypt(self, data): compressed = self.compress(data) encoded", "sha1 class HackerCrypt: __version__ = 'v1.2.0.2' def __init__(self): self.MAGIC = sha1('[TL]').digest() self.KEY =", "FD in 2011 for their personal releases. # The script has been modified", "base64 import b64encode, b64decode from binascii import hexlify, unhexlify from random import randrange", "data): compressed = self.compress(data) encoded = self.encode(compressed) data = self.MAGIC + encoded iv", "self.KEY + iv data = self.rc4(data, key) if not data.startswith(self.MAGIC): return None decoded", "''.join(encoded) def decode(self, encoded): encoded = list(encoded) for x in range(len(encoded)): alpha =", "bz2 import decompress as d_bz2 from zlib import compress as c_zlib from zlib", "def makeIV(self): iv = '' for i in range(4): iv += chr(randrange(256)) return", "return d_bz2(b64decode(unhexed)) def encrypt(self, data): compressed = self.compress(data) encoded = self.encode(compressed) data =", "= list(encoded) for x in range(len(encoded)): alpha = str(encoded[x]) encoded[x] = str(ord(alpha) -", "s[i] + ord(key[i % len(key)])) % 256 s[i], s[j] = s[j], s[i] j", "'v1.2.0.2' def __init__(self): self.MAGIC = sha1('[TL]').digest() self.KEY = sha1('TL-Cookies').digest() def makeIV(self): iv =", "= self.encode(compressed) data = self.MAGIC + encoded iv = self.makeIV() key = self.KEY", "return ''.join(results) def encode(self, data): b64 = b64encode(data) hex = hexlify(b64) encoded =", "iv return iv + self.rc4(data, key) def decrypt(self, encrypted): if len(encrypted) < 4:", "encoded = self.encode(compressed) data = self.MAGIC + encoded iv = self.makeIV() key =", "+ self.rc4(data, key) def decrypt(self, encrypted): if len(encrypted) < 4: return None iv", "PROJECT ########################## # Filename: HackerCrypt.py # Created by: Cody/Fd Green Cat Fd (January", "from zlib import decompress as d_zlib from sha import sha as sha1 class", "s[j], s[i] = s[i], s[j] results.append(chr(ord(c) ^ s[(s[j] + s[i]) % 256])) return", "i in range(4): iv += chr(randrange(256)) return iv def rc4(self, data, key): j", "personal releases. # The script has been modified to meet Toon Land's coding", "j = (j + s[i] + ord(key[i % len(key)])) % 256 s[i], s[j]", "self.encode(compressed) data = self.MAGIC + encoded iv = self.makeIV() key = self.KEY +", "self.makeIV() key = self.KEY + iv return iv + self.rc4(data, key) def decrypt(self,", "binascii import hexlify, unhexlify from random import randrange from __main__ import __dict__ as", "for i in range(256): j = (j + s[i] + ord(key[i % len(key)]))", "for x in range(len(encoded)): alpha = int(encoded[x]) + 2 encoded[x] = chr(alpha) return", "d_zlib from sha import sha as sha1 class HackerCrypt: __version__ = 'v1.2.0.2' def", "encode(self, data): b64 = b64encode(data) hex = hexlify(b64) encoded = list(hexlify(hex)) for x", "b64encode(data) hex = hexlify(b64) encoded = list(hexlify(hex)) for x in range(len(encoded)): alpha =", "list(hexlify(hex)) for x in range(len(encoded)): alpha = int(encoded[x]) + 2 encoded[x] = chr(alpha)", "sha as sha1 class HackerCrypt: __version__ = 'v1.2.0.2' def __init__(self): self.MAGIC = sha1('[TL]').digest()", "256 s[j], s[i] = s[i], s[j] results.append(chr(ord(c) ^ s[(s[j] + s[i]) % 256]))", "encrypted): if len(encrypted) < 4: return None iv = encrypted[:4] data = encrypted[4:]", "(i + s[j]) % 256 s[j], s[i] = s[i], s[j] results.append(chr(ord(c) ^ s[(s[j]", "releases. # The script has been modified to meet Toon Land's coding standards.", "in range(len(encoded)): alpha = int(encoded[x]) + 2 encoded[x] = chr(alpha) return ''.join(encoded) def", "from sha import sha as sha1 class HackerCrypt: __version__ = 'v1.2.0.2' def __init__(self):", "self.KEY + iv return iv + self.rc4(data, key) def decrypt(self, encrypted): if len(encrypted)", "TOON LAND PROJECT ########################## # Filename: HackerCrypt.py # Created by: Cody/Fd Green Cat", "Fd (January 31st, 2013) #### # Description: # # Encryption method written by", "# # Encryption method written by Team FD in 2011 for their personal", "Cody/Fd Green Cat Fd (January 31st, 2013) #### # Description: # # Encryption", "def decrypt(self, encrypted): if len(encrypted) < 4: return None iv = encrypted[:4] data", "return iv + self.rc4(data, key) def decrypt(self, encrypted): if len(encrypted) < 4: return", "= hexlify(b64) encoded = list(hexlify(hex)) for x in range(len(encoded)): alpha = int(encoded[x]) +", "= self.makeIV() key = self.KEY + iv return iv + self.rc4(data, key) def", "#### # Description: # # Encryption method written by Team FD in 2011", "% 256 s[i], s[j] = s[j], s[i] j = i = 0 results", "bz2 import compress as c_bz2 from bz2 import decompress as d_bz2 from zlib", "for i in range(4): iv += chr(randrange(256)) return iv def rc4(self, data, key):", "+ s[i] + ord(key[i % len(key)])) % 256 s[i], s[j] = s[j], s[i]", "j = i = 0 results = [] for c in data: j", "import compress as c_bz2 from bz2 import decompress as d_bz2 from zlib import", "1) % 256 i = (i + s[j]) % 256 s[j], s[i] =", "^ s[(s[j] + s[i]) % 256])) return ''.join(results) def encode(self, data): b64 =", "unhexed = unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed)) def encrypt(self, data): compressed = self.compress(data) encoded =", "def compress(self, data): bz2 = b64encode(c_bz2(data)) return c_zlib(hexlify(bz2)) def decompress(self, compressed): unhexed =", "compress as c_bz2 from bz2 import decompress as d_bz2 from zlib import compress", "'' for i in range(4): iv += chr(randrange(256)) return iv def rc4(self, data,", "data: j = (j + 1) % 256 i = (i + s[j])", "key): j = 0 s = range(256) for i in range(256): j =", "= '' for i in range(4): iv += chr(randrange(256)) return iv def rc4(self,", "encrypt(self, data): compressed = self.compress(data) encoded = self.encode(compressed) data = self.MAGIC + encoded", "j = 0 s = range(256) for i in range(256): j = (j", "standards. #### from base64 import b64encode, b64decode from binascii import hexlify, unhexlify from", "coding standards. #### from base64 import b64encode, b64decode from binascii import hexlify, unhexlify", "s = range(256) for i in range(256): j = (j + s[i] +", "import __dict__ as __main__ from bz2 import compress as c_bz2 from bz2 import", "i = 0 results = [] for c in data: j = (j", "= [] for c in data: j = (j + 1) % 256", "modified to meet Toon Land's coding standards. #### from base64 import b64encode, b64decode", "encoded iv = self.makeIV() key = self.KEY + iv return iv + self.rc4(data,", "return None iv = encrypted[:4] data = encrypted[4:] key = self.KEY + iv", "decompress as d_bz2 from zlib import compress as c_zlib from zlib import decompress", "b64 = b64encode(data) hex = hexlify(b64) encoded = list(hexlify(hex)) for x in range(len(encoded)):", "chr(alpha) return ''.join(encoded) def decode(self, encoded): encoded = list(encoded) for x in range(len(encoded)):", "+ iv return iv + self.rc4(data, key) def decrypt(self, encrypted): if len(encrypted) <", "return ''.join(encoded) def decode(self, encoded): encoded = list(encoded) for x in range(len(encoded)): alpha", "= encrypted[:4] data = encrypted[4:] key = self.KEY + iv data = self.rc4(data,", "in range(256): j = (j + s[i] + ord(key[i % len(key)])) % 256", "(j + 1) % 256 i = (i + s[j]) % 256 s[j],", "s[i] = s[i], s[j] results.append(chr(ord(c) ^ s[(s[j] + s[i]) % 256])) return ''.join(results)", "for x in range(len(encoded)): alpha = str(encoded[x]) encoded[x] = str(ord(alpha) - 2) encoded", "= s[j], s[i] j = i = 0 results = [] for c", "Encryption method written by Team FD in 2011 for their personal releases. #", "from base64 import b64encode, b64decode from binascii import hexlify, unhexlify from random import", "def __init__(self): self.MAGIC = sha1('[TL]').digest() self.KEY = sha1('TL-Cookies').digest() def makeIV(self): iv = ''", "= i = 0 results = [] for c in data: j =", "b64decode(unhexed) def compress(self, data): bz2 = b64encode(c_bz2(data)) return c_zlib(hexlify(bz2)) def decompress(self, compressed): unhexed", "c_zlib(hexlify(bz2)) def decompress(self, compressed): unhexed = unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed)) def encrypt(self, data): compressed", "x in range(len(encoded)): alpha = int(encoded[x]) + 2 encoded[x] = chr(alpha) return ''.join(encoded)", "unhexed = unhexlify(encoded) return b64decode(unhexed) def compress(self, data): bz2 = b64encode(c_bz2(data)) return c_zlib(hexlify(bz2))", "results = [] for c in data: j = (j + 1) %", "their personal releases. # The script has been modified to meet Toon Land's", "(January 31st, 2013) #### # Description: # # Encryption method written by Team", "data): bz2 = b64encode(c_bz2(data)) return c_zlib(hexlify(bz2)) def decompress(self, compressed): unhexed = unhexlify(d_zlib(compressed)) return", "= b64encode(data) hex = hexlify(b64) encoded = list(hexlify(hex)) for x in range(len(encoded)): alpha", "########################## THE TOON LAND PROJECT ########################## # Filename: HackerCrypt.py # Created by: Cody/Fd", "def decompress(self, compressed): unhexed = unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed)) def encrypt(self, data): compressed =", "s[i], s[j] = s[j], s[i] j = i = 0 results = []", "return c_zlib(hexlify(bz2)) def decompress(self, compressed): unhexed = unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed)) def encrypt(self, data):", "self.KEY = sha1('TL-Cookies').digest() def makeIV(self): iv = '' for i in range(4): iv", "= encrypted[4:] key = self.KEY + iv data = self.rc4(data, key) if not", "= chr(alpha) return ''.join(encoded) def decode(self, encoded): encoded = list(encoded) for x in", "hexlify, unhexlify from random import randrange from __main__ import __dict__ as __main__ from", "sha1('TL-Cookies').digest() def makeIV(self): iv = '' for i in range(4): iv += chr(randrange(256))", "as d_bz2 from zlib import compress as c_zlib from zlib import decompress as", "+ 2 encoded[x] = chr(alpha) return ''.join(encoded) def decode(self, encoded): encoded = list(encoded)", "compress as c_zlib from zlib import decompress as d_zlib from sha import sha", "results.append(chr(ord(c) ^ s[(s[j] + s[i]) % 256])) return ''.join(results) def encode(self, data): b64", "b64encode, b64decode from binascii import hexlify, unhexlify from random import randrange from __main__", "range(4): iv += chr(randrange(256)) return iv def rc4(self, data, key): j = 0", "len(key)])) % 256 s[i], s[j] = s[j], s[i] j = i = 0", "ord(key[i % len(key)])) % 256 s[i], s[j] = s[j], s[i] j = i", "from __main__ import __dict__ as __main__ from bz2 import compress as c_bz2 from", "sha import sha as sha1 class HackerCrypt: __version__ = 'v1.2.0.2' def __init__(self): self.MAGIC", "def rc4(self, data, key): j = 0 s = range(256) for i in", "encrypted[:4] data = encrypted[4:] key = self.KEY + iv data = self.rc4(data, key)", "s[i], s[j] results.append(chr(ord(c) ^ s[(s[j] + s[i]) % 256])) return ''.join(results) def encode(self,", "key) def decrypt(self, encrypted): if len(encrypted) < 4: return None iv = encrypted[:4]", "iv = encrypted[:4] data = encrypted[4:] key = self.KEY + iv data =", "in range(4): iv += chr(randrange(256)) return iv def rc4(self, data, key): j =", "% 256 s[j], s[i] = s[i], s[j] results.append(chr(ord(c) ^ s[(s[j] + s[i]) %", "self.rc4(data, key) def decrypt(self, encrypted): if len(encrypted) < 4: return None iv =", "2) encoded = unhexlify(''.join(encoded)) unhexed = unhexlify(encoded) return b64decode(unhexed) def compress(self, data): bz2", "import decompress as d_zlib from sha import sha as sha1 class HackerCrypt: __version__", "from zlib import compress as c_zlib from zlib import decompress as d_zlib from", "+ iv data = self.rc4(data, key) if not data.startswith(self.MAGIC): return None decoded =", "Filename: HackerCrypt.py # Created by: Cody/Fd Green Cat Fd (January 31st, 2013) ####", "+ encoded iv = self.makeIV() key = self.KEY + iv return iv +", "as __main__ from bz2 import compress as c_bz2 from bz2 import decompress as", "Cat Fd (January 31st, 2013) #### # Description: # # Encryption method written", "= unhexlify(''.join(encoded)) unhexed = unhexlify(encoded) return b64decode(unhexed) def compress(self, data): bz2 = b64encode(c_bz2(data))", "= (i + s[j]) % 256 s[j], s[i] = s[i], s[j] results.append(chr(ord(c) ^", "import randrange from __main__ import __dict__ as __main__ from bz2 import compress as", "compressed = self.compress(data) encoded = self.encode(compressed) data = self.MAGIC + encoded iv =", "in data: j = (j + 1) % 256 i = (i +", "key = self.KEY + iv return iv + self.rc4(data, key) def decrypt(self, encrypted):", "# Created by: Cody/Fd Green Cat Fd (January 31st, 2013) #### # Description:", "meet Toon Land's coding standards. #### from base64 import b64encode, b64decode from binascii", "= (j + 1) % 256 i = (i + s[j]) % 256", "# Description: # # Encryption method written by Team FD in 2011 for", "range(256) for i in range(256): j = (j + s[i] + ord(key[i %", "= str(encoded[x]) encoded[x] = str(ord(alpha) - 2) encoded = unhexlify(''.join(encoded)) unhexed = unhexlify(encoded)", "Land's coding standards. #### from base64 import b64encode, b64decode from binascii import hexlify,", "written by Team FD in 2011 for their personal releases. # The script", "random import randrange from __main__ import __dict__ as __main__ from bz2 import compress", "encoded = unhexlify(''.join(encoded)) unhexed = unhexlify(encoded) return b64decode(unhexed) def compress(self, data): bz2 =", "256 i = (i + s[j]) % 256 s[j], s[i] = s[i], s[j]", "= (j + s[i] + ord(key[i % len(key)])) % 256 s[i], s[j] =", "hex = hexlify(b64) encoded = list(hexlify(hex)) for x in range(len(encoded)): alpha = int(encoded[x])", "i = (i + s[j]) % 256 s[j], s[i] = s[i], s[j] results.append(chr(ord(c)", "alpha = int(encoded[x]) + 2 encoded[x] = chr(alpha) return ''.join(encoded) def decode(self, encoded):", "2011 for their personal releases. # The script has been modified to meet", "unhexlify from random import randrange from __main__ import __dict__ as __main__ from bz2", "The script has been modified to meet Toon Land's coding standards. #### from", "s[i] j = i = 0 results = [] for c in data:", "x in range(len(encoded)): alpha = str(encoded[x]) encoded[x] = str(ord(alpha) - 2) encoded =", "Team FD in 2011 for their personal releases. # The script has been", "data = self.MAGIC + encoded iv = self.makeIV() key = self.KEY + iv", "########################## # Filename: HackerCrypt.py # Created by: Cody/Fd Green Cat Fd (January 31st,", "alpha = str(encoded[x]) encoded[x] = str(ord(alpha) - 2) encoded = unhexlify(''.join(encoded)) unhexed =", "= unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed)) def encrypt(self, data): compressed = self.compress(data) encoded = self.encode(compressed)", "4: return None iv = encrypted[:4] data = encrypted[4:] key = self.KEY +", "by Team FD in 2011 for their personal releases. # The script has", "iv data = self.rc4(data, key) if not data.startswith(self.MAGIC): return None decoded = self.decode(data[len(self.MAGIC):])", "data = self.rc4(data, key) if not data.startswith(self.MAGIC): return None decoded = self.decode(data[len(self.MAGIC):]) return", "LAND PROJECT ########################## # Filename: HackerCrypt.py # Created by: Cody/Fd Green Cat Fd", "= 0 results = [] for c in data: j = (j +", "has been modified to meet Toon Land's coding standards. #### from base64 import", "compress(self, data): bz2 = b64encode(c_bz2(data)) return c_zlib(hexlify(bz2)) def decompress(self, compressed): unhexed = unhexlify(d_zlib(compressed))", "bz2 = b64encode(c_bz2(data)) return c_zlib(hexlify(bz2)) def decompress(self, compressed): unhexed = unhexlify(d_zlib(compressed)) return d_bz2(b64decode(unhexed))", "__main__ from bz2 import compress as c_bz2 from bz2 import decompress as d_bz2", "= range(256) for i in range(256): j = (j + s[i] + ord(key[i", "[] for c in data: j = (j + 1) % 256 i", "range(len(encoded)): alpha = int(encoded[x]) + 2 encoded[x] = chr(alpha) return ''.join(encoded) def decode(self,", "< 4: return None iv = encrypted[:4] data = encrypted[4:] key = self.KEY", "__version__ = 'v1.2.0.2' def __init__(self): self.MAGIC = sha1('[TL]').digest() self.KEY = sha1('TL-Cookies').digest() def makeIV(self):", "def encode(self, data): b64 = b64encode(data) hex = hexlify(b64) encoded = list(hexlify(hex)) for", "+ s[i]) % 256])) return ''.join(results) def encode(self, data): b64 = b64encode(data) hex", "in range(len(encoded)): alpha = str(encoded[x]) encoded[x] = str(ord(alpha) - 2) encoded = unhexlify(''.join(encoded))", "unhexlify(''.join(encoded)) unhexed = unhexlify(encoded) return b64decode(unhexed) def compress(self, data): bz2 = b64encode(c_bz2(data)) return", "script has been modified to meet Toon Land's coding standards. #### from base64", "encoded = list(hexlify(hex)) for x in range(len(encoded)): alpha = int(encoded[x]) + 2 encoded[x]", "as c_bz2 from bz2 import decompress as d_bz2 from zlib import compress as", "data): b64 = b64encode(data) hex = hexlify(b64) encoded = list(hexlify(hex)) for x in", "iv = '' for i in range(4): iv += chr(randrange(256)) return iv def", "% 256 i = (i + s[j]) % 256 s[j], s[i] = s[i],", "i in range(256): j = (j + s[i] + ord(key[i % len(key)])) %", "= sha1('[TL]').digest() self.KEY = sha1('TL-Cookies').digest() def makeIV(self): iv = '' for i in", "iv += chr(randrange(256)) return iv def rc4(self, data, key): j = 0 s", "been modified to meet Toon Land's coding standards. #### from base64 import b64encode,", "encoded = list(encoded) for x in range(len(encoded)): alpha = str(encoded[x]) encoded[x] = str(ord(alpha)", "method written by Team FD in 2011 for their personal releases. # The", "0 results = [] for c in data: j = (j + 1)", "chr(randrange(256)) return iv def rc4(self, data, key): j = 0 s = range(256)", "as sha1 class HackerCrypt: __version__ = 'v1.2.0.2' def __init__(self): self.MAGIC = sha1('[TL]').digest() self.KEY", "encoded): encoded = list(encoded) for x in range(len(encoded)): alpha = str(encoded[x]) encoded[x] =", "range(256): j = (j + s[i] + ord(key[i % len(key)])) % 256 s[i],", "sha1('[TL]').digest() self.KEY = sha1('TL-Cookies').digest() def makeIV(self): iv = '' for i in range(4):", "for their personal releases. # The script has been modified to meet Toon", "2 encoded[x] = chr(alpha) return ''.join(encoded) def decode(self, encoded): encoded = list(encoded) for", "Created by: Cody/Fd Green Cat Fd (January 31st, 2013) #### # Description: #", "key = self.KEY + iv data = self.rc4(data, key) if not data.startswith(self.MAGIC): return", "import decompress as d_bz2 from zlib import compress as c_zlib from zlib import", "zlib import compress as c_zlib from zlib import decompress as d_zlib from sha", "(j + s[i] + ord(key[i % len(key)])) % 256 s[i], s[j] = s[j]," ]
[ "a game with the slug \"{}\"'.format(slug) ) if game.status != GameStatus.ACTIVE: raise CommandError(", "models.Game.objects.get(slug=slug) except models.Game.DoesNotExist: raise CommandError( 'Could not find a game with the slug", "django.core.management.base import BaseCommand, CommandError from core import models from core.game import process_turn from", "help='Slug of the game to advance', ) parser.add_argument( '--no_input', action='store_true', help='Skip prompt.', )", "advance', ) parser.add_argument( '--no_input', action='store_true', help='Skip prompt.', ) parser.add_argument( '--dry_run', action='store_true', help='Do not", "action='store_true', help='Skip prompt.', ) parser.add_argument( '--dry_run', action='store_true', help='Do not advance turn - show", "'game', type=str, help='Slug of the game to advance', ) parser.add_argument( '--no_input', action='store_true', help='Skip", "core import models from core.game import process_turn from core.models.base import GameStatus from .", "game' ) turn = game.get_current_turn() if turn.game.status != GameStatus.ACTIVE: raise CommandError('Cannot restore turn", "GameStatus from . import DiplomacyManagementCommandMixin class Command(BaseCommand, DiplomacyManagementCommandMixin): @property def help(self): return 'Restore", "an inactive game' ) turn = game.get_current_turn() if turn.game.status != GameStatus.ACTIVE: raise CommandError('Cannot", "not active.') if not turn.ready_to_process: self.stdout.write('Not all nations have finalized their orders\\n') self.prompt()", "turn on an inactive game' ) turn = game.get_current_turn() if turn.game.status != GameStatus.ACTIVE:", "the game to advance', ) parser.add_argument( '--no_input', action='store_true', help='Skip prompt.', ) parser.add_argument( '--dry_run',", "if game.status != GameStatus.ACTIVE: raise CommandError( 'Cannot advance turn on an inactive game'", "turn.game.status != GameStatus.ACTIVE: raise CommandError('Cannot restore turn if game is not active.') if", "import models from core.game import process_turn from core.models.base import GameStatus from . import", "their orders\\n') self.prompt() result = process_turn(turn, dry_run) if dry_run: pretty_output = json.dumps(result, sort_keys=True,", "game is not active.') if not turn.ready_to_process: self.stdout.write('Not all nations have finalized their", "a game to a previous turn' def add_arguments(self, parser): parser.add_argument( 'game', type=str, help='Slug", "'Restore a game to a previous turn' def add_arguments(self, parser): parser.add_argument( 'game', type=str,", "= models.Game.objects.get(slug=slug) except models.Game.DoesNotExist: raise CommandError( 'Could not find a game with the", "advance turn - show outcome of adjudicator.', ) def handle(self, *args, **options): slug", "class Command(BaseCommand, DiplomacyManagementCommandMixin): @property def help(self): return 'Restore a game to a previous", "game to advance', ) parser.add_argument( '--no_input', action='store_true', help='Skip prompt.', ) parser.add_argument( '--dry_run', action='store_true',", "CommandError('Cannot restore turn if game is not active.') if not turn.ready_to_process: self.stdout.write('Not all", "parser.add_argument( '--no_input', action='store_true', help='Skip prompt.', ) parser.add_argument( '--dry_run', action='store_true', help='Do not advance turn", "game = models.Game.objects.get(slug=slug) except models.Game.DoesNotExist: raise CommandError( 'Could not find a game with", "action='store_true', help='Do not advance turn - show outcome of adjudicator.', ) def handle(self,", "import DiplomacyManagementCommandMixin class Command(BaseCommand, DiplomacyManagementCommandMixin): @property def help(self): return 'Restore a game to", "<filename>core/management/commands/advance_turn.py import json from django.core.management.base import BaseCommand, CommandError from core import models from", "core.game import process_turn from core.models.base import GameStatus from . import DiplomacyManagementCommandMixin class Command(BaseCommand,", "@property def help(self): return 'Restore a game to a previous turn' def add_arguments(self,", "self.noinput = options['no_input'] dry_run = options['dry_run'] try: game = models.Game.objects.get(slug=slug) except models.Game.DoesNotExist: raise", "find a game with the slug \"{}\"'.format(slug) ) if game.status != GameStatus.ACTIVE: raise", "a previous turn' def add_arguments(self, parser): parser.add_argument( 'game', type=str, help='Slug of the game", "all nations have finalized their orders\\n') self.prompt() result = process_turn(turn, dry_run) if dry_run:", "'Could not find a game with the slug \"{}\"'.format(slug) ) if game.status !=", "import json from django.core.management.base import BaseCommand, CommandError from core import models from core.game", "= options['no_input'] dry_run = options['dry_run'] try: game = models.Game.objects.get(slug=slug) except models.Game.DoesNotExist: raise CommandError(", "game.status != GameStatus.ACTIVE: raise CommandError( 'Cannot advance turn on an inactive game' )", "of the game to advance', ) parser.add_argument( '--no_input', action='store_true', help='Skip prompt.', ) parser.add_argument(", "raise CommandError( 'Cannot advance turn on an inactive game' ) turn = game.get_current_turn()", "turn if game is not active.') if not turn.ready_to_process: self.stdout.write('Not all nations have", "have finalized their orders\\n') self.prompt() result = process_turn(turn, dry_run) if dry_run: pretty_output =", "the slug \"{}\"'.format(slug) ) if game.status != GameStatus.ACTIVE: raise CommandError( 'Cannot advance turn", "slug \"{}\"'.format(slug) ) if game.status != GameStatus.ACTIVE: raise CommandError( 'Cannot advance turn on", "to advance', ) parser.add_argument( '--no_input', action='store_true', help='Skip prompt.', ) parser.add_argument( '--dry_run', action='store_true', help='Do", "return 'Restore a game to a previous turn' def add_arguments(self, parser): parser.add_argument( 'game',", "prompt.', ) parser.add_argument( '--dry_run', action='store_true', help='Do not advance turn - show outcome of", "game with the slug \"{}\"'.format(slug) ) if game.status != GameStatus.ACTIVE: raise CommandError( 'Cannot", "handle(self, *args, **options): slug = options['game'] self.noinput = options['no_input'] dry_run = options['dry_run'] try:", "import process_turn from core.models.base import GameStatus from . import DiplomacyManagementCommandMixin class Command(BaseCommand, DiplomacyManagementCommandMixin):", ") turn = game.get_current_turn() if turn.game.status != GameStatus.ACTIVE: raise CommandError('Cannot restore turn if", "Command(BaseCommand, DiplomacyManagementCommandMixin): @property def help(self): return 'Restore a game to a previous turn'", "of adjudicator.', ) def handle(self, *args, **options): slug = options['game'] self.noinput = options['no_input']", "turn = game.get_current_turn() if turn.game.status != GameStatus.ACTIVE: raise CommandError('Cannot restore turn if game", "GameStatus.ACTIVE: raise CommandError('Cannot restore turn if game is not active.') if not turn.ready_to_process:", "self.prompt() result = process_turn(turn, dry_run) if dry_run: pretty_output = json.dumps(result, sort_keys=True, indent=4) self.stdout.write(pretty_output)", "from core import models from core.game import process_turn from core.models.base import GameStatus from", "advance turn on an inactive game' ) turn = game.get_current_turn() if turn.game.status !=", "not find a game with the slug \"{}\"'.format(slug) ) if game.status != GameStatus.ACTIVE:", "type=str, help='Slug of the game to advance', ) parser.add_argument( '--no_input', action='store_true', help='Skip prompt.',", "GameStatus.ACTIVE: raise CommandError( 'Cannot advance turn on an inactive game' ) turn =", "options['dry_run'] try: game = models.Game.objects.get(slug=slug) except models.Game.DoesNotExist: raise CommandError( 'Could not find a", "options['game'] self.noinput = options['no_input'] dry_run = options['dry_run'] try: game = models.Game.objects.get(slug=slug) except models.Game.DoesNotExist:", "- show outcome of adjudicator.', ) def handle(self, *args, **options): slug = options['game']", "restore turn if game is not active.') if not turn.ready_to_process: self.stdout.write('Not all nations", "parser): parser.add_argument( 'game', type=str, help='Slug of the game to advance', ) parser.add_argument( '--no_input',", "turn.ready_to_process: self.stdout.write('Not all nations have finalized their orders\\n') self.prompt() result = process_turn(turn, dry_run)", "models from core.game import process_turn from core.models.base import GameStatus from . import DiplomacyManagementCommandMixin", "DiplomacyManagementCommandMixin class Command(BaseCommand, DiplomacyManagementCommandMixin): @property def help(self): return 'Restore a game to a", "= options['dry_run'] try: game = models.Game.objects.get(slug=slug) except models.Game.DoesNotExist: raise CommandError( 'Could not find", "if game is not active.') if not turn.ready_to_process: self.stdout.write('Not all nations have finalized", "add_arguments(self, parser): parser.add_argument( 'game', type=str, help='Slug of the game to advance', ) parser.add_argument(", "'--dry_run', action='store_true', help='Do not advance turn - show outcome of adjudicator.', ) def", ". import DiplomacyManagementCommandMixin class Command(BaseCommand, DiplomacyManagementCommandMixin): @property def help(self): return 'Restore a game", "CommandError from core import models from core.game import process_turn from core.models.base import GameStatus", "CommandError( 'Cannot advance turn on an inactive game' ) turn = game.get_current_turn() if", "turn - show outcome of adjudicator.', ) def handle(self, *args, **options): slug =", "= game.get_current_turn() if turn.game.status != GameStatus.ACTIVE: raise CommandError('Cannot restore turn if game is", "help(self): return 'Restore a game to a previous turn' def add_arguments(self, parser): parser.add_argument(", "game to a previous turn' def add_arguments(self, parser): parser.add_argument( 'game', type=str, help='Slug of", "'Cannot advance turn on an inactive game' ) turn = game.get_current_turn() if turn.game.status", ") parser.add_argument( '--dry_run', action='store_true', help='Do not advance turn - show outcome of adjudicator.',", "**options): slug = options['game'] self.noinput = options['no_input'] dry_run = options['dry_run'] try: game =", "dry_run = options['dry_run'] try: game = models.Game.objects.get(slug=slug) except models.Game.DoesNotExist: raise CommandError( 'Could not", "help='Do not advance turn - show outcome of adjudicator.', ) def handle(self, *args,", "'--no_input', action='store_true', help='Skip prompt.', ) parser.add_argument( '--dry_run', action='store_true', help='Do not advance turn -", "game.get_current_turn() if turn.game.status != GameStatus.ACTIVE: raise CommandError('Cannot restore turn if game is not", "not turn.ready_to_process: self.stdout.write('Not all nations have finalized their orders\\n') self.prompt() result = process_turn(turn,", "parser.add_argument( 'game', type=str, help='Slug of the game to advance', ) parser.add_argument( '--no_input', action='store_true',", "from core.models.base import GameStatus from . import DiplomacyManagementCommandMixin class Command(BaseCommand, DiplomacyManagementCommandMixin): @property def", ") if game.status != GameStatus.ACTIVE: raise CommandError( 'Cannot advance turn on an inactive", "= options['game'] self.noinput = options['no_input'] dry_run = options['dry_run'] try: game = models.Game.objects.get(slug=slug) except", "options['no_input'] dry_run = options['dry_run'] try: game = models.Game.objects.get(slug=slug) except models.Game.DoesNotExist: raise CommandError( 'Could", ") parser.add_argument( '--no_input', action='store_true', help='Skip prompt.', ) parser.add_argument( '--dry_run', action='store_true', help='Do not advance", "models.Game.DoesNotExist: raise CommandError( 'Could not find a game with the slug \"{}\"'.format(slug) )", "!= GameStatus.ACTIVE: raise CommandError('Cannot restore turn if game is not active.') if not", "json from django.core.management.base import BaseCommand, CommandError from core import models from core.game import", "to a previous turn' def add_arguments(self, parser): parser.add_argument( 'game', type=str, help='Slug of the", "slug = options['game'] self.noinput = options['no_input'] dry_run = options['dry_run'] try: game = models.Game.objects.get(slug=slug)", "with the slug \"{}\"'.format(slug) ) if game.status != GameStatus.ACTIVE: raise CommandError( 'Cannot advance", "CommandError( 'Could not find a game with the slug \"{}\"'.format(slug) ) if game.status", "!= GameStatus.ACTIVE: raise CommandError( 'Cannot advance turn on an inactive game' ) turn", ") def handle(self, *args, **options): slug = options['game'] self.noinput = options['no_input'] dry_run =", "from . import DiplomacyManagementCommandMixin class Command(BaseCommand, DiplomacyManagementCommandMixin): @property def help(self): return 'Restore a", "from django.core.management.base import BaseCommand, CommandError from core import models from core.game import process_turn", "help='Skip prompt.', ) parser.add_argument( '--dry_run', action='store_true', help='Do not advance turn - show outcome", "inactive game' ) turn = game.get_current_turn() if turn.game.status != GameStatus.ACTIVE: raise CommandError('Cannot restore", "parser.add_argument( '--dry_run', action='store_true', help='Do not advance turn - show outcome of adjudicator.', )", "import GameStatus from . import DiplomacyManagementCommandMixin class Command(BaseCommand, DiplomacyManagementCommandMixin): @property def help(self): return", "self.stdout.write('Not all nations have finalized their orders\\n') self.prompt() result = process_turn(turn, dry_run) if", "def handle(self, *args, **options): slug = options['game'] self.noinput = options['no_input'] dry_run = options['dry_run']", "outcome of adjudicator.', ) def handle(self, *args, **options): slug = options['game'] self.noinput =", "orders\\n') self.prompt() result = process_turn(turn, dry_run) if dry_run: pretty_output = json.dumps(result, sort_keys=True, indent=4)", "core.models.base import GameStatus from . import DiplomacyManagementCommandMixin class Command(BaseCommand, DiplomacyManagementCommandMixin): @property def help(self):", "from core.game import process_turn from core.models.base import GameStatus from . import DiplomacyManagementCommandMixin class", "finalized their orders\\n') self.prompt() result = process_turn(turn, dry_run) if dry_run: pretty_output = json.dumps(result,", "active.') if not turn.ready_to_process: self.stdout.write('Not all nations have finalized their orders\\n') self.prompt() result", "is not active.') if not turn.ready_to_process: self.stdout.write('Not all nations have finalized their orders\\n')", "not advance turn - show outcome of adjudicator.', ) def handle(self, *args, **options):", "previous turn' def add_arguments(self, parser): parser.add_argument( 'game', type=str, help='Slug of the game to", "if turn.game.status != GameStatus.ACTIVE: raise CommandError('Cannot restore turn if game is not active.')", "nations have finalized their orders\\n') self.prompt() result = process_turn(turn, dry_run) if dry_run: pretty_output", "BaseCommand, CommandError from core import models from core.game import process_turn from core.models.base import", "try: game = models.Game.objects.get(slug=slug) except models.Game.DoesNotExist: raise CommandError( 'Could not find a game", "raise CommandError('Cannot restore turn if game is not active.') if not turn.ready_to_process: self.stdout.write('Not", "except models.Game.DoesNotExist: raise CommandError( 'Could not find a game with the slug \"{}\"'.format(slug)", "show outcome of adjudicator.', ) def handle(self, *args, **options): slug = options['game'] self.noinput", "def add_arguments(self, parser): parser.add_argument( 'game', type=str, help='Slug of the game to advance', )", "*args, **options): slug = options['game'] self.noinput = options['no_input'] dry_run = options['dry_run'] try: game", "adjudicator.', ) def handle(self, *args, **options): slug = options['game'] self.noinput = options['no_input'] dry_run", "import BaseCommand, CommandError from core import models from core.game import process_turn from core.models.base", "\"{}\"'.format(slug) ) if game.status != GameStatus.ACTIVE: raise CommandError( 'Cannot advance turn on an", "raise CommandError( 'Could not find a game with the slug \"{}\"'.format(slug) ) if", "turn' def add_arguments(self, parser): parser.add_argument( 'game', type=str, help='Slug of the game to advance',", "on an inactive game' ) turn = game.get_current_turn() if turn.game.status != GameStatus.ACTIVE: raise", "if not turn.ready_to_process: self.stdout.write('Not all nations have finalized their orders\\n') self.prompt() result =", "process_turn from core.models.base import GameStatus from . import DiplomacyManagementCommandMixin class Command(BaseCommand, DiplomacyManagementCommandMixin): @property", "def help(self): return 'Restore a game to a previous turn' def add_arguments(self, parser):", "DiplomacyManagementCommandMixin): @property def help(self): return 'Restore a game to a previous turn' def" ]
[ "toc = time.perf_counter() print(f\"Dominant selection in {toc - tic:0.2f}s\") if timing: end =", "timing: toc = time.perf_counter() print(f\"Dominant selection in {toc - tic:0.2f}s\") if timing: end", "labels = cluster.labels_ labels = list(labels) centroid = cluster.cluster_centers_ if timing: tic =", "j = labels.count(i) j = j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter() print(f\"Percentage", "indices = np.argsort(percent)[::-1] dominant = centroid[indices[0]] if timing: end = time.perf_counter() total_time =", "= end - start print( f\"sklearn_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid,", "different clusters, create histogram, and normalise sorted_labels = np.arange(0, len(np.unique(labels)) + 1) (hist,", "start + (percent * 300) cv2.rectangle(rect, (int(start), 0), (int(end), 50), colour.astype(\"uint8\").tolist(), -1) start", "tic = time.perf_counter() cluster = KMeans(n_clusters=colours) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans", "if timing: tic = time.perf_counter() percent = [] _, counts = np.unique(labels, return_counts=True)", "of different clusters, create histogram, and normalise sorted_labels = np.arange(0, len(np.unique(labels)) + 1)", "centroid = cv2.kmeans( pixels, n_colours, None, criteria, 10, flags) labels = labels.flatten().tolist() _,", "img.reshape((-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the image in {toc -", "50), colour.astype(\"uint8\").tolist(), -1) start = end return rect def clamp(x): ''' Utility function", "def sklearn_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using sklearn, based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454", "img = img * 255 img = img.reshape((-1, 3)) if timing: toc =", "tic = time.perf_counter() cluster = KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001) cluster.fit(img) if timing: toc", "= list(labels) centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter() percent = []", "import io from skimage.transform import rescale def cv2_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour", "colour) in colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] \", \"{:0.2f}%\".format(percent * 100)) end = start", "for web use that speeds up the sklearn variant. Also can use a", "toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") if timing: tic =", "factor to improve the speed at cost of accuracy ''' if timing: start", "dominant colour in an image ''' dominant_colour = fast_dominant_colour(img_url, scale=0.1) r = dominant_colour[0]", "tic = time.perf_counter() dominant = centroid[np.argmax(counts)] if timing: toc = time.perf_counter() print(f\"Dominant selection", "range(len(centroid)): j = counts[i] j = j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter()", "in colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] \", \"{:0.2f}%\".format(percent * 100)) end = start +", "function to return ints from 0-255 ''' return int(max(0, min(x, 255))) def get_rgb_colour(img_url,", "-1) start = end return rect def clamp(x): ''' Utility function to return", "for i in range(len(centroid)): j = counts[i] j = j/(len(labels)) percent.append(j) if timing:", "= np.histogram(labels, bins=sorted_labels) hist = hist.astype(\"float\") hist /= hist.sum() # Create frequency rect", "cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels", "end - start print(f\"fast_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def", "labels, centroid = cv2.kmeans( pixels, n_colours, None, criteria, 10, flags) labels = labels.flatten().tolist()", "<reponame>tawilkinson/dominant-colour import numpy as np import time from cv2 import cv2 from sklearn.cluster", "cv2.kmeans( pixels, n_colours, None, criteria, 10, flags) labels = labels.flatten().tolist() _, counts =", "tic = time.perf_counter() percent = [] _, counts = np.unique(labels, return_counts=True) for i", "hex sting and return an rgb tuple of the dominant colour in an", "start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) if scale != 1.0:", "_, counts = np.unique(labels, return_counts=True) for i in range(len(centroid)): j = counts[i] j", "selection in {toc - tic:0.2f}s\") if timing: end = time.perf_counter() total_time = end", "import cv2 from sklearn.cluster import KMeans from skimage import io from skimage.transform import", "based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if timing: start = time.perf_counter() tic = time.perf_counter() img", "https://stackoverflow.com/a/43111221/2523885 ''' if timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url)", "in range(len(centroid)): j = labels.count(i) j = j/(len(labels)) percent.append(j) if timing: toc =", "return dominant, labels, centroid, total_time def fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0): ''' Faster method", "def get_rgb_colour(img_url, debug=False): ''' Method to print hex sting and return an rgb", "end - start print( f\"sklearn_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time", "= hist.astype(\"float\") hist /= hist.sum() # Create frequency rect and iterate through each", "= end - start print(f\"cv2_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time", "cluster's colour # and percentage rect = np.zeros((50, 300, 3), dtype=np.uint8) colours =", "rescale(img, scale, multichannel=True) img = img * 255 img = img.reshape((-1, 3)) if", "tol=0.001) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\")", "tic = time.perf_counter() img = io.imread(img_url) pixels = np.float32(img.reshape(-1, 3)) if timing: toc", "b = dominant_colour[2] if debug: hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r), clamp(g), clamp(b)) print(f'{hex_str}') rgb_colour =", "if timing: tic = time.perf_counter() n_colours = colours criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,", "= KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation", "= time.perf_counter() percent = [] for i in range(len(centroid)): j = labels.count(i) j", "= time.perf_counter() img = io.imread(img_url) img = img.reshape((-1, 3)) if timing: toc =", "colours criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1) flags = cv2.KMEANS_RANDOM_CENTERS _, labels,", "method using open cv, based on https://stackoverflow.com/a/43111221/2523885 ''' if timing: start = time.perf_counter()", "improve the speed at cost of accuracy ''' if timing: start = time.perf_counter()", "number of different clusters, create histogram, and normalise sorted_labels = np.arange(0, len(np.unique(labels)) +", "execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def sklearn_dominant_colour(img_url, colours=10, timing=False): '''", "None, criteria, 10, flags) labels = labels.flatten().tolist() _, counts = np.unique(labels, return_counts=True) if", "on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if timing: start = time.perf_counter() tic = time.perf_counter() img =", "3)) if timing: toc = time.perf_counter() print(f\"Loaded the image in {toc - tic:0.2f}s\")", "speed at cost of accuracy ''' if timing: start = time.perf_counter() tic =", "n_init=3, max_iter=10, tol=0.001) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc", "- tic:0.2f}s\") if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours) cluster.fit(img) if timing:", "end = start + (percent * 300) cv2.rectangle(rect, (int(start), 0), (int(end), 50), colour.astype(\"uint8\").tolist(),", "return dominant, labels, centroid, total_time def visualise_colours(labels, centroids): ''' Generate a visualisation of", "time.perf_counter() cluster = KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001) cluster.fit(img) if timing: toc = time.perf_counter()", "# Create frequency rect and iterate through each cluster's colour # and percentage", "if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels =", "the sklearn variant. Also can use a scaling factor to improve the speed", "each cluster's colour # and percentage rect = np.zeros((50, 300, 3), dtype=np.uint8) colours", "rgb tuple of the dominant colour in an image ''' dominant_colour = fast_dominant_colour(img_url,", "= start + (percent * 300) cv2.rectangle(rect, (int(start), 0), (int(end), 50), colour.astype(\"uint8\").tolist(), -1)", "as np import time from cv2 import cv2 from sklearn.cluster import KMeans from", "debug=False): ''' Method to print hex sting and return an rgb tuple of", "flags = cv2.KMEANS_RANDOM_CENTERS _, labels, centroid = cv2.kmeans( pixels, n_colours, None, criteria, 10,", "cluster.labels_ centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter() percent = [] _,", "time.perf_counter() print(f\"Dominant selection in {toc - tic:0.2f}s\") if timing: end = time.perf_counter() total_time", "end = time.perf_counter() total_time = end - start print(f\"cv2_dominant_colour execution in {total_time:0.2f}s\") return", "KMeans from skimage import io from skimage.transform import rescale def cv2_dominant_colour(img_url, colours=10, timing=False):", "if timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) if scale", "time.perf_counter() percent = [] for i in range(len(centroid)): j = labels.count(i) j =", "(percent * 300) cv2.rectangle(rect, (int(start), 0), (int(end), 50), colour.astype(\"uint8\").tolist(), -1) start = end", "img = io.imread(img_url) pixels = np.float32(img.reshape(-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded", "tic:0.2f}s\") indices = np.argsort(percent)[::-1] dominant = centroid[indices[0]] if timing: end = time.perf_counter() total_time", "+ 1) (hist, _) = np.histogram(labels, bins=sorted_labels) hist = hist.astype(\"float\") hist /= hist.sum()", "end return rect def clamp(x): ''' Utility function to return ints from 0-255", "= time.perf_counter() total_time = end - start print(f\"cv2_dominant_colour execution in {total_time:0.2f}s\") return dominant,", "tic:0.2f}s\") labels = cluster.labels_ centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter() percent", "time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) if scale != 1.0: img =", "total_time def fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0): ''' Faster method for web use that", "= time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter()", "= dominant_colour[2] if debug: hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r), clamp(g), clamp(b)) print(f'{hex_str}') rgb_colour = (clamp(r),", "visualise_colours(labels, centroids): ''' Generate a visualisation of the colours in an image '''", "if timing: toc = time.perf_counter() print(f\"Loaded the image in {toc - tic:0.2f}s\") if", "cv2 import cv2 from sklearn.cluster import KMeans from skimage import io from skimage.transform", "centroid, total_time def sklearn_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using sklearn, based", "timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) img = img.reshape((-1,", "Utility function to return ints from 0-255 ''' return int(max(0, min(x, 255))) def", "use that speeds up the sklearn variant. Also can use a scaling factor", "time.perf_counter() total_time = end - start print( f\"sklearn_dominant_colour execution in {total_time:0.2f}s\") return dominant,", "r = dominant_colour[0] g = dominant_colour[1] b = dominant_colour[2] if debug: hex_str =", "def fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0): ''' Faster method for web use that speeds", "scale=0.1) r = dominant_colour[0] g = dominant_colour[1] b = dominant_colour[2] if debug: hex_str", "= time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) pixels = np.float32(img.reshape(-1, 3)) if", "hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r), clamp(g), clamp(b)) print(f'{hex_str}') rgb_colour = (clamp(r), clamp(g), clamp(b)) return rgb_colour", "start = 0 for (percent, colour) in colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] \", \"{:0.2f}%\".format(percent", "the number of different clusters, create histogram, and normalise sorted_labels = np.arange(0, len(np.unique(labels))", "200, .1) flags = cv2.KMEANS_RANDOM_CENTERS _, labels, centroid = cv2.kmeans( pixels, n_colours, None,", "timing=False): ''' Dominant Colour method using sklearn, based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if timing:", "= io.imread(img_url) pixels = np.float32(img.reshape(-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the", "np.zeros((50, 300, 3), dtype=np.uint8) colours = sorted(zip(hist, centroids)) start = 0 for (percent,", "image in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours)", "Get the number of different clusters, create histogram, and normalise sorted_labels = np.arange(0,", "= time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) img = img.reshape((-1, 3)) if", "''' if timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) if", "clamp(x): ''' Utility function to return ints from 0-255 ''' return int(max(0, min(x,", "in {toc - tic:0.2f}s\") if timing: end = time.perf_counter() total_time = end -", "return_counts=True) for i in range(len(centroid)): j = counts[i] j = j/(len(labels)) percent.append(j) if", "centroids): ''' Generate a visualisation of the colours in an image ''' #", "dominant_colour = fast_dominant_colour(img_url, scale=0.1) r = dominant_colour[0] g = dominant_colour[1] b = dominant_colour[2]", "time.perf_counter() total_time = end - start print(f\"cv2_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels,", "sklearn_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using sklearn, based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 '''", "timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) if scale !=", "- tic:0.2f}s\") labels = cluster.labels_ centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter()", "centroid[indices[0]] if timing: end = time.perf_counter() total_time = end - start print( f\"sklearn_dominant_colour", "= time.perf_counter() total_time = end - start print( f\"sklearn_dominant_colour execution in {total_time:0.2f}s\") return", "min(x, 255))) def get_rgb_colour(img_url, debug=False): ''' Method to print hex sting and return", "dominant_colour[2] if debug: hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r), clamp(g), clamp(b)) print(f'{hex_str}') rgb_colour = (clamp(r), clamp(g),", "cluster = KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans", "if timing: end = time.perf_counter() total_time = end - start print(f\"cv2_dominant_colour execution in", "counts = np.unique(labels, return_counts=True) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc", "centroid[indices[0]] if timing: end = time.perf_counter() total_time = end - start print(f\"fast_dominant_colour execution", "img = img.reshape((-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the image in", "time.perf_counter() img = io.imread(img_url) if scale != 1.0: img = rescale(img, scale, multichannel=True)", "and percentage rect = np.zeros((50, 300, 3), dtype=np.uint8) colours = sorted(zip(hist, centroids)) start", "_) = np.histogram(labels, bins=sorted_labels) hist = hist.astype(\"float\") hist /= hist.sum() # Create frequency", "= KMeans(n_clusters=colours) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc -", "= cv2.KMEANS_RANDOM_CENTERS _, labels, centroid = cv2.kmeans( pixels, n_colours, None, criteria, 10, flags)", "0 for (percent, colour) in colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] \", \"{:0.2f}%\".format(percent * 100))", "time.perf_counter() cluster = KMeans(n_clusters=colours) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation in", "= rescale(img, scale, multichannel=True) img = img * 255 img = img.reshape((-1, 3))", "i in range(len(centroid)): j = labels.count(i) j = j/(len(labels)) percent.append(j) if timing: toc", "300) cv2.rectangle(rect, (int(start), 0), (int(end), 50), colour.astype(\"uint8\").tolist(), -1) start = end return rect", "counts = np.unique(labels, return_counts=True) for i in range(len(centroid)): j = counts[i] j =", "centroid, total_time def fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0): ''' Faster method for web use", "if scale != 1.0: img = rescale(img, scale, multichannel=True) img = img *", "timing: tic = time.perf_counter() percent = [] for i in range(len(centroid)): j =", "max_iter=10, tol=0.001) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc -", "centroids)) start = 0 for (percent, colour) in colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] \",", "from 0-255 ''' return int(max(0, min(x, 255))) def get_rgb_colour(img_url, debug=False): ''' Method to", "time.perf_counter() print(f\"Percentage calculation in {toc - tic:0.2f}s\") indices = np.argsort(percent)[::-1] dominant = centroid[indices[0]]", "np.histogram(labels, bins=sorted_labels) hist = hist.astype(\"float\") hist /= hist.sum() # Create frequency rect and", "return an rgb tuple of the dominant colour in an image ''' dominant_colour", "{toc - tic:0.2f}s\") if timing: end = time.perf_counter() total_time = end - start", "j = counts[i] j = j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter() print(f\"Percentage", "def clamp(x): ''' Utility function to return ints from 0-255 ''' return int(max(0,", "- tic:0.2f}s\") if timing: end = time.perf_counter() total_time = end - start print(f\"cv2_dominant_colour", "img = io.imread(img_url) if scale != 1.0: img = rescale(img, scale, multichannel=True) img", "\"{:0.2f}%\".format(percent * 100)) end = start + (percent * 300) cv2.rectangle(rect, (int(start), 0),", "f\"sklearn_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def fast_dominant_colour(img_url, colours=10, timing=False,", "debug: hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r), clamp(g), clamp(b)) print(f'{hex_str}') rgb_colour = (clamp(r), clamp(g), clamp(b)) return", "if timing: end = time.perf_counter() total_time = end - start print( f\"sklearn_dominant_colour execution", "= time.perf_counter() n_colours = colours criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1) flags", "{total_time:0.2f}s\") return dominant, labels, centroid, total_time def fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0): ''' Faster", "= dominant_colour[0] g = dominant_colour[1] b = dominant_colour[2] if debug: hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r),", "centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter() percent = [] for i", "if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001) cluster.fit(img) if", "create histogram, and normalise sorted_labels = np.arange(0, len(np.unique(labels)) + 1) (hist, _) =", "iterate through each cluster's colour # and percentage rect = np.zeros((50, 300, 3),", "= end - start print(f\"fast_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time", "the dominant colour in an image ''' dominant_colour = fast_dominant_colour(img_url, scale=0.1) r =", "and return an rgb tuple of the dominant colour in an image '''", "colours=10, timing=False): ''' Dominant Colour method using sklearn, based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if", "= time.perf_counter() percent = [] _, counts = np.unique(labels, return_counts=True) for i in", "calculation in {toc - tic:0.2f}s\") labels = cluster.labels_ labels = list(labels) centroid =", "tic = time.perf_counter() n_colours = colours criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1)", "percent = [] for i in range(len(centroid)): j = labels.count(i) j = j/(len(labels))", "print(f\"cv2_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def sklearn_dominant_colour(img_url, colours=10, timing=False):", "tic:0.2f}s\") if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours) cluster.fit(img) if timing: toc", "{toc - tic:0.2f}s\") if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours, n_init=3, max_iter=10,", "Dominant Colour method using open cv, based on https://stackoverflow.com/a/43111221/2523885 ''' if timing: start", "g = dominant_colour[1] b = dominant_colour[2] if debug: hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r), clamp(g), clamp(b))", "{clamp(colour[0])}, {clamp(colour[0])}] \", \"{:0.2f}%\".format(percent * 100)) end = start + (percent * 300)", "to return ints from 0-255 ''' return int(max(0, min(x, 255))) def get_rgb_colour(img_url, debug=False):", "= np.zeros((50, 300, 3), dtype=np.uint8) colours = sorted(zip(hist, centroids)) start = 0 for", "''' if timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) pixels", "rect def clamp(x): ''' Utility function to return ints from 0-255 ''' return", "colour in an image ''' dominant_colour = fast_dominant_colour(img_url, scale=0.1) r = dominant_colour[0] g", "cv2_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using open cv, based on https://stackoverflow.com/a/43111221/2523885", "= centroid[indices[0]] if timing: end = time.perf_counter() total_time = end - start print(", "end = time.perf_counter() total_time = end - start print( f\"sklearn_dominant_colour execution in {total_time:0.2f}s\")", "calculation in {toc - tic:0.2f}s\") indices = np.argsort(percent)[::-1] dominant = centroid[indices[0]] if timing:", "total_time = end - start print( f\"sklearn_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels,", "an image ''' dominant_colour = fast_dominant_colour(img_url, scale=0.1) r = dominant_colour[0] g = dominant_colour[1]", "execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0):", "np.argsort(percent)[::-1] dominant = centroid[indices[0]] if timing: end = time.perf_counter() total_time = end -", "calculation in {toc - tic:0.2f}s\") labels = cluster.labels_ centroid = cluster.cluster_centers_ if timing:", "in {toc - tic:0.2f}s\") indices = np.argsort(percent)[::-1] dominant = centroid[indices[0]] if timing: end", "labels = list(labels) centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter() percent =", "if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") if timing:", "of accuracy ''' if timing: start = time.perf_counter() tic = time.perf_counter() img =", "labels, centroid, total_time def sklearn_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using sklearn,", "time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels = cluster.labels_ labels = list(labels)", "in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def sklearn_dominant_colour(img_url, colours=10, timing=False): ''' Dominant", "1.0: img = rescale(img, scale, multichannel=True) img = img * 255 img =", "import rescale def cv2_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using open cv,", "len(np.unique(labels)) + 1) (hist, _) = np.histogram(labels, bins=sorted_labels) hist = hist.astype(\"float\") hist /=", "= time.perf_counter() cluster = KMeans(n_clusters=colours) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation", "io.imread(img_url) pixels = np.float32(img.reshape(-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the image", "Create frequency rect and iterate through each cluster's colour # and percentage rect", "= counts[i] j = j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter() print(f\"Percentage calculation", "= colours criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1) flags = cv2.KMEANS_RANDOM_CENTERS _,", "scale=1.0): ''' Faster method for web use that speeds up the sklearn variant.", "= time.perf_counter() img = io.imread(img_url) if scale != 1.0: img = rescale(img, scale,", "= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1) flags = cv2.KMEANS_RANDOM_CENTERS _, labels, centroid =", "sorted(zip(hist, centroids)) start = 0 for (percent, colour) in colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}]", "img * 255 img = img.reshape((-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded", "= cluster.labels_ labels = list(labels) centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter()", "[] for i in range(len(centroid)): j = labels.count(i) j = j/(len(labels)) percent.append(j) if", "int(max(0, min(x, 255))) def get_rgb_colour(img_url, debug=False): ''' Method to print hex sting and", "KMeans(n_clusters=colours) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\")", "clusters, create histogram, and normalise sorted_labels = np.arange(0, len(np.unique(labels)) + 1) (hist, _)", "cluster = KMeans(n_clusters=colours) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc", "(hist, _) = np.histogram(labels, bins=sorted_labels) hist = hist.astype(\"float\") hist /= hist.sum() # Create", "{toc - tic:0.2f}s\") labels = cluster.labels_ centroid = cluster.cluster_centers_ if timing: tic =", "= time.perf_counter() img = io.imread(img_url) pixels = np.float32(img.reshape(-1, 3)) if timing: toc =", "- tic:0.2f}s\") labels = cluster.labels_ labels = list(labels) centroid = cluster.cluster_centers_ if timing:", "percent = [] _, counts = np.unique(labels, return_counts=True) for i in range(len(centroid)): j", "= labels.count(i) j = j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter() print(f\"Percentage calculation", "toc = time.perf_counter() print(f\"Loaded the image in {toc - tic:0.2f}s\") if timing: tic", "- tic:0.2f}s\") if timing: tic = time.perf_counter() n_colours = colours criteria = (cv2.TERM_CRITERIA_EPS", "tic:0.2f}s\") if timing: tic = time.perf_counter() dominant = centroid[np.argmax(counts)] if timing: toc =", "total_time = end - start print(f\"fast_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid,", "centroid, total_time def visualise_colours(labels, centroids): ''' Generate a visualisation of the colours in", "hist.sum() # Create frequency rect and iterate through each cluster's colour # and", "return ints from 0-255 ''' return int(max(0, min(x, 255))) def get_rgb_colour(img_url, debug=False): '''", "labels.flatten().tolist() _, counts = np.unique(labels, return_counts=True) if timing: toc = time.perf_counter() print(f\"KMeans calculation", "cluster.cluster_centers_ if timing: tic = time.perf_counter() percent = [] for i in range(len(centroid)):", "counts[i] j = j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter() print(f\"Percentage calculation in", "based on https://stackoverflow.com/a/43111221/2523885 ''' if timing: start = time.perf_counter() tic = time.perf_counter() img", "time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels = cluster.labels_ centroid = cluster.cluster_centers_", "from skimage import io from skimage.transform import rescale def cv2_dominant_colour(img_url, colours=10, timing=False): '''", "cluster.cluster_centers_ if timing: tic = time.perf_counter() percent = [] _, counts = np.unique(labels,", "hist = hist.astype(\"float\") hist /= hist.sum() # Create frequency rect and iterate through", "= [] _, counts = np.unique(labels, return_counts=True) for i in range(len(centroid)): j =", "toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels = cluster.labels_ centroid", "start print(f\"fast_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def visualise_colours(labels, centroids):", "300, 3), dtype=np.uint8) colours = sorted(zip(hist, centroids)) start = 0 for (percent, colour)", "sorted_labels = np.arange(0, len(np.unique(labels)) + 1) (hist, _) = np.histogram(labels, bins=sorted_labels) hist =", "Method to print hex sting and return an rgb tuple of the dominant", "dominant_colour[0] g = dominant_colour[1] b = dominant_colour[2] if debug: hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r), clamp(g),", "the speed at cost of accuracy ''' if timing: start = time.perf_counter() tic", "centroid[np.argmax(counts)] if timing: toc = time.perf_counter() print(f\"Dominant selection in {toc - tic:0.2f}s\") if", "visualisation of the colours in an image ''' # Get the number of", "* 255 img = img.reshape((-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the", "can use a scaling factor to improve the speed at cost of accuracy", "method using sklearn, based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if timing: start = time.perf_counter() tic", "if timing: tic = time.perf_counter() percent = [] for i in range(len(centroid)): j", "the image in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() n_colours =", "np import time from cv2 import cv2 from sklearn.cluster import KMeans from skimage", "= time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels = cluster.labels_ labels =", "time.perf_counter() total_time = end - start print(f\"fast_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels,", "= dominant_colour[1] b = dominant_colour[2] if debug: hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r), clamp(g), clamp(b)) print(f'{hex_str}')", "colours=10, timing=False): ''' Dominant Colour method using open cv, based on https://stackoverflow.com/a/43111221/2523885 '''", "= time.perf_counter() dominant = centroid[np.argmax(counts)] if timing: toc = time.perf_counter() print(f\"Dominant selection in", "timing: toc = time.perf_counter() print(f\"Percentage calculation in {toc - tic:0.2f}s\") indices = np.argsort(percent)[::-1]", "labels = labels.flatten().tolist() _, counts = np.unique(labels, return_counts=True) if timing: toc = time.perf_counter()", "labels = cluster.labels_ centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter() percent =", "percentage rect = np.zeros((50, 300, 3), dtype=np.uint8) colours = sorted(zip(hist, centroids)) start =", "colours=10, timing=False, scale=1.0): ''' Faster method for web use that speeds up the", "an rgb tuple of the dominant colour in an image ''' dominant_colour =", "if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours) cluster.fit(img) if timing: toc =", "tic:0.2f}s\") if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001) cluster.fit(img)", "at cost of accuracy ''' if timing: start = time.perf_counter() tic = time.perf_counter()", "image ''' # Get the number of different clusters, create histogram, and normalise", "+ (percent * 300) cv2.rectangle(rect, (int(start), 0), (int(end), 50), colour.astype(\"uint8\").tolist(), -1) start =", "print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels = cluster.labels_ centroid = cluster.cluster_centers_ if", "cv2.rectangle(rect, (int(start), 0), (int(end), 50), colour.astype(\"uint8\").tolist(), -1) start = end return rect def", "ints from 0-255 ''' return int(max(0, min(x, 255))) def get_rgb_colour(img_url, debug=False): ''' Method", "img = rescale(img, scale, multichannel=True) img = img * 255 img = img.reshape((-1,", "''' Faster method for web use that speeds up the sklearn variant. Also", "pixels = np.float32(img.reshape(-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the image in", "np.arange(0, len(np.unique(labels)) + 1) (hist, _) = np.histogram(labels, bins=sorted_labels) hist = hist.astype(\"float\") hist", "image ''' dominant_colour = fast_dominant_colour(img_url, scale=0.1) r = dominant_colour[0] g = dominant_colour[1] b", "timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels = cluster.labels_", "KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001) cluster.fit(img) if timing: toc = time.perf_counter() print(f\"KMeans calculation in", "the colours in an image ''' # Get the number of different clusters,", "timing: end = time.perf_counter() total_time = end - start print(f\"fast_dominant_colour execution in {total_time:0.2f}s\")", "total_time def sklearn_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using sklearn, based on:", "sklearn variant. Also can use a scaling factor to improve the speed at", "255 img = img.reshape((-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the image", "= np.arange(0, len(np.unique(labels)) + 1) (hist, _) = np.histogram(labels, bins=sorted_labels) hist = hist.astype(\"float\")", "for i in range(len(centroid)): j = labels.count(i) j = j/(len(labels)) percent.append(j) if timing:", "= end return rect def clamp(x): ''' Utility function to return ints from", "timing=False): ''' Dominant Colour method using open cv, based on https://stackoverflow.com/a/43111221/2523885 ''' if", "start print( f\"sklearn_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def fast_dominant_colour(img_url,", "flags) labels = labels.flatten().tolist() _, counts = np.unique(labels, return_counts=True) if timing: toc =", "the image in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() cluster =", "= labels.flatten().tolist() _, counts = np.unique(labels, return_counts=True) if timing: toc = time.perf_counter() print(f\"KMeans", "hist /= hist.sum() # Create frequency rect and iterate through each cluster's colour", "- start print( f\"sklearn_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def", "# Get the number of different clusters, create histogram, and normalise sorted_labels =", "accuracy ''' if timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url)", "execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def visualise_colours(labels, centroids): ''' Generate", "j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter() print(f\"Percentage calculation in {toc - tic:0.2f}s\")", "''' Method to print hex sting and return an rgb tuple of the", "tic:0.2f}s\") labels = cluster.labels_ labels = list(labels) centroid = cluster.cluster_centers_ if timing: tic", "speeds up the sklearn variant. Also can use a scaling factor to improve", "timing: end = time.perf_counter() total_time = end - start print(f\"cv2_dominant_colour execution in {total_time:0.2f}s\")", "= io.imread(img_url) img = img.reshape((-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the", "Faster method for web use that speeds up the sklearn variant. Also can", "= time.perf_counter() print(f\"Dominant selection in {toc - tic:0.2f}s\") if timing: end = time.perf_counter()", "if timing: toc = time.perf_counter() print(f\"Percentage calculation in {toc - tic:0.2f}s\") indices =", "if timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) pixels =", "time.perf_counter() percent = [] _, counts = np.unique(labels, return_counts=True) for i in range(len(centroid)):", "start print(f\"cv2_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def sklearn_dominant_colour(img_url, colours=10,", "(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1) flags = cv2.KMEANS_RANDOM_CENTERS _, labels, centroid = cv2.kmeans(", "return dominant, labels, centroid, total_time def sklearn_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method", "j = j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter() print(f\"Percentage calculation in {toc", "= time.perf_counter() total_time = end - start print(f\"fast_dominant_colour execution in {total_time:0.2f}s\") return dominant,", "n_colours, None, criteria, 10, flags) labels = labels.flatten().tolist() _, counts = np.unique(labels, return_counts=True)", "= cluster.cluster_centers_ if timing: tic = time.perf_counter() percent = [] for i in", "range(len(centroid)): j = labels.count(i) j = j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter()", "def visualise_colours(labels, centroids): ''' Generate a visualisation of the colours in an image", "skimage.transform import rescale def cv2_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using open", "a visualisation of the colours in an image ''' # Get the number", "time.perf_counter() dominant = centroid[np.argmax(counts)] if timing: toc = time.perf_counter() print(f\"Dominant selection in {toc", "in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours, n_init=3,", "0-255 ''' return int(max(0, min(x, 255))) def get_rgb_colour(img_url, debug=False): ''' Method to print", "if timing: end = time.perf_counter() total_time = end - start print(f\"fast_dominant_colour execution in", "bins=sorted_labels) hist = hist.astype(\"float\") hist /= hist.sum() # Create frequency rect and iterate", "of the dominant colour in an image ''' dominant_colour = fast_dominant_colour(img_url, scale=0.1) r", "method for web use that speeds up the sklearn variant. Also can use", "using open cv, based on https://stackoverflow.com/a/43111221/2523885 ''' if timing: start = time.perf_counter() tic", "Colour method using sklearn, based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if timing: start = time.perf_counter()", "sting and return an rgb tuple of the dominant colour in an image", "in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() n_colours = colours criteria", "print(f\"Dominant selection in {toc - tic:0.2f}s\") if timing: end = time.perf_counter() total_time =", "''' # Get the number of different clusters, create histogram, and normalise sorted_labels", "= cluster.labels_ centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter() percent = []", "that speeds up the sklearn variant. Also can use a scaling factor to", "an image ''' # Get the number of different clusters, create histogram, and", "tic:0.2f}s\") if timing: end = time.perf_counter() total_time = end - start print(f\"cv2_dominant_colour execution", "from sklearn.cluster import KMeans from skimage import io from skimage.transform import rescale def", "in an image ''' dominant_colour = fast_dominant_colour(img_url, scale=0.1) r = dominant_colour[0] g =", "n_colours = colours criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1) flags = cv2.KMEANS_RANDOM_CENTERS", "and normalise sorted_labels = np.arange(0, len(np.unique(labels)) + 1) (hist, _) = np.histogram(labels, bins=sorted_labels)", "timing: toc = time.perf_counter() print(f\"Loaded the image in {toc - tic:0.2f}s\") if timing:", "_, labels, centroid = cv2.kmeans( pixels, n_colours, None, criteria, 10, flags) labels =", "{toc - tic:0.2f}s\") if timing: tic = time.perf_counter() dominant = centroid[np.argmax(counts)] if timing:", "toc = time.perf_counter() print(f\"Percentage calculation in {toc - tic:0.2f}s\") indices = np.argsort(percent)[::-1] dominant", "colour # and percentage rect = np.zeros((50, 300, 3), dtype=np.uint8) colours = sorted(zip(hist,", "= sorted(zip(hist, centroids)) start = 0 for (percent, colour) in colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])},", "- tic:0.2f}s\") if timing: tic = time.perf_counter() dominant = centroid[np.argmax(counts)] if timing: toc", "dominant = centroid[indices[0]] if timing: end = time.perf_counter() total_time = end - start", "/= hist.sum() # Create frequency rect and iterate through each cluster's colour #", "end = time.perf_counter() total_time = end - start print(f\"fast_dominant_colour execution in {total_time:0.2f}s\") return", "in {toc - tic:0.2f}s\") labels = cluster.labels_ labels = list(labels) centroid = cluster.cluster_centers_", "{total_time:0.2f}s\") return dominant, labels, centroid, total_time def visualise_colours(labels, centroids): ''' Generate a visualisation", "img = io.imread(img_url) img = img.reshape((-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded", "= time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) if scale != 1.0: img", "in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours) cluster.fit(img)", "scale != 1.0: img = rescale(img, scale, multichannel=True) img = img * 255", "in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def visualise_colours(labels, centroids): ''' Generate a", "in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0): '''", "through each cluster's colour # and percentage rect = np.zeros((50, 300, 3), dtype=np.uint8)", "print hex sting and return an rgb tuple of the dominant colour in", "= cv2.kmeans( pixels, n_colours, None, criteria, 10, flags) labels = labels.flatten().tolist() _, counts", "0), (int(end), 50), colour.astype(\"uint8\").tolist(), -1) start = end return rect def clamp(x): '''", "percent.append(j) if timing: toc = time.perf_counter() print(f\"Percentage calculation in {toc - tic:0.2f}s\") indices", "end - start print(f\"cv2_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def", "= j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter() print(f\"Percentage calculation in {toc -", "if debug: hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r), clamp(g), clamp(b)) print(f'{hex_str}') rgb_colour = (clamp(r), clamp(g), clamp(b))", "''' Dominant Colour method using open cv, based on https://stackoverflow.com/a/43111221/2523885 ''' if timing:", "variant. Also can use a scaling factor to improve the speed at cost", "io.imread(img_url) if scale != 1.0: img = rescale(img, scale, multichannel=True) img = img", "print(f\"Loaded the image in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() n_colours", "= time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels = cluster.labels_ centroid =", "(int(end), 50), colour.astype(\"uint8\").tolist(), -1) start = end return rect def clamp(x): ''' Utility", "time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) img = img.reshape((-1, 3)) if timing:", "rect = np.zeros((50, 300, 3), dtype=np.uint8) colours = sorted(zip(hist, centroids)) start = 0", "frequency rect and iterate through each cluster's colour # and percentage rect =", "[] _, counts = np.unique(labels, return_counts=True) for i in range(len(centroid)): j = counts[i]", "io.imread(img_url) img = img.reshape((-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the image", "labels.count(i) j = j/(len(labels)) percent.append(j) if timing: toc = time.perf_counter() print(f\"Percentage calculation in", "import KMeans from skimage import io from skimage.transform import rescale def cv2_dominant_colour(img_url, colours=10,", "start = end return rect def clamp(x): ''' Utility function to return ints", "(percent, colour) in colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] \", \"{:0.2f}%\".format(percent * 100)) end =", "= time.perf_counter() print(f\"Percentage calculation in {toc - tic:0.2f}s\") indices = np.argsort(percent)[::-1] dominant =", "return_counts=True) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") if", "sklearn, based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if timing: start = time.perf_counter() tic = time.perf_counter()", "np.float32(img.reshape(-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the image in {toc -", "in an image ''' # Get the number of different clusters, create histogram,", "colours = sorted(zip(hist, centroids)) start = 0 for (percent, colour) in colours: print(f\"[{clamp(colour[0])},", "https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url)", "* 100)) end = start + (percent * 300) cv2.rectangle(rect, (int(start), 0), (int(end),", "if timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) img =", "up the sklearn variant. Also can use a scaling factor to improve the", "import numpy as np import time from cv2 import cv2 from sklearn.cluster import", "labels, centroid, total_time def fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0): ''' Faster method for web", "print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels = cluster.labels_ labels = list(labels) centroid", "(int(start), 0), (int(end), 50), colour.astype(\"uint8\").tolist(), -1) start = end return rect def clamp(x):", "print( f\"sklearn_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def fast_dominant_colour(img_url, colours=10,", "cv2.TERM_CRITERIA_MAX_ITER, 200, .1) flags = cv2.KMEANS_RANDOM_CENTERS _, labels, centroid = cv2.kmeans( pixels, n_colours,", "cv2.KMEANS_RANDOM_CENTERS _, labels, centroid = cv2.kmeans( pixels, n_colours, None, criteria, 10, flags) labels", "= img.reshape((-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the image in {toc", "labels, centroid, total_time def visualise_colours(labels, centroids): ''' Generate a visualisation of the colours", "= np.unique(labels, return_counts=True) for i in range(len(centroid)): j = counts[i] j = j/(len(labels))", "return rect def clamp(x): ''' Utility function to return ints from 0-255 '''", "in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() dominant = centroid[np.argmax(counts)] if", "{toc - tic:0.2f}s\") labels = cluster.labels_ labels = list(labels) centroid = cluster.cluster_centers_ if", "''' return int(max(0, min(x, 255))) def get_rgb_colour(img_url, debug=False): ''' Method to print hex", "time.perf_counter() print(f\"Loaded the image in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter()", "open cv, based on https://stackoverflow.com/a/43111221/2523885 ''' if timing: start = time.perf_counter() tic =", "fast_dominant_colour(img_url, scale=0.1) r = dominant_colour[0] g = dominant_colour[1] b = dominant_colour[2] if debug:", "rect and iterate through each cluster's colour # and percentage rect = np.zeros((50,", "255))) def get_rgb_colour(img_url, debug=False): ''' Method to print hex sting and return an", "{total_time:0.2f}s\") return dominant, labels, centroid, total_time def sklearn_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour", "{toc - tic:0.2f}s\") indices = np.argsort(percent)[::-1] dominant = centroid[indices[0]] if timing: end =", "get_rgb_colour(img_url, debug=False): ''' Method to print hex sting and return an rgb tuple", "use a scaling factor to improve the speed at cost of accuracy '''", "dominant, labels, centroid, total_time def visualise_colours(labels, centroids): ''' Generate a visualisation of the", "= centroid[indices[0]] if timing: end = time.perf_counter() total_time = end - start print(f\"fast_dominant_colour", "= time.perf_counter() print(f\"Loaded the image in {toc - tic:0.2f}s\") if timing: tic =", "# and percentage rect = np.zeros((50, 300, 3), dtype=np.uint8) colours = sorted(zip(hist, centroids))", "10, flags) labels = labels.flatten().tolist() _, counts = np.unique(labels, return_counts=True) if timing: toc", "image in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours,", "tic = time.perf_counter() percent = [] for i in range(len(centroid)): j = labels.count(i)", "to improve the speed at cost of accuracy ''' if timing: start =", "fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0): ''' Faster method for web use that speeds up", "colour.astype(\"uint8\").tolist(), -1) start = end return rect def clamp(x): ''' Utility function to", "= fast_dominant_colour(img_url, scale=0.1) r = dominant_colour[0] g = dominant_colour[1] b = dominant_colour[2] if", "cv, based on https://stackoverflow.com/a/43111221/2523885 ''' if timing: start = time.perf_counter() tic = time.perf_counter()", "and iterate through each cluster's colour # and percentage rect = np.zeros((50, 300,", "cluster.labels_ labels = list(labels) centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter() percent", "Dominant Colour method using sklearn, based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if timing: start =", "timing: end = time.perf_counter() total_time = end - start print( f\"sklearn_dominant_colour execution in", "= np.argsort(percent)[::-1] dominant = centroid[indices[0]] if timing: end = time.perf_counter() total_time = end", "3), dtype=np.uint8) colours = sorted(zip(hist, centroids)) start = 0 for (percent, colour) in", "''' dominant_colour = fast_dominant_colour(img_url, scale=0.1) r = dominant_colour[0] g = dominant_colour[1] b =", "multichannel=True) img = img * 255 img = img.reshape((-1, 3)) if timing: toc", "from cv2 import cv2 from sklearn.cluster import KMeans from skimage import io from", "- tic:0.2f}s\") indices = np.argsort(percent)[::-1] dominant = centroid[indices[0]] if timing: end = time.perf_counter()", "''' Generate a visualisation of the colours in an image ''' # Get", "for (percent, colour) in colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] \", \"{:0.2f}%\".format(percent * 100)) end", "\", \"{:0.2f}%\".format(percent * 100)) end = start + (percent * 300) cv2.rectangle(rect, (int(start),", "= img * 255 img = img.reshape((-1, 3)) if timing: toc = time.perf_counter()", "= np.unique(labels, return_counts=True) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc -", "Generate a visualisation of the colours in an image ''' # Get the", "def cv2_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using open cv, based on", "scaling factor to improve the speed at cost of accuracy ''' if timing:", "scale, multichannel=True) img = img * 255 img = img.reshape((-1, 3)) if timing:", "i in range(len(centroid)): j = counts[i] j = j/(len(labels)) percent.append(j) if timing: toc", "histogram, and normalise sorted_labels = np.arange(0, len(np.unique(labels)) + 1) (hist, _) = np.histogram(labels,", "!= 1.0: img = rescale(img, scale, multichannel=True) img = img * 255 img", "on https://stackoverflow.com/a/43111221/2523885 ''' if timing: start = time.perf_counter() tic = time.perf_counter() img =", "criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1) flags = cv2.KMEANS_RANDOM_CENTERS _, labels, centroid", "- start print(f\"cv2_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def sklearn_dominant_colour(img_url,", "print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] \", \"{:0.2f}%\".format(percent * 100)) end = start + (percent *", "timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") if timing: tic", "{toc - tic:0.2f}s\") if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours) cluster.fit(img) if", "time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() dominant", "timing: tic = time.perf_counter() percent = [] _, counts = np.unique(labels, return_counts=True) for", "print(f\"Loaded the image in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() cluster", "web use that speeds up the sklearn variant. Also can use a scaling", "100)) end = start + (percent * 300) cv2.rectangle(rect, (int(start), 0), (int(end), 50),", "+ cv2.TERM_CRITERIA_MAX_ITER, 200, .1) flags = cv2.KMEANS_RANDOM_CENTERS _, labels, centroid = cv2.kmeans( pixels,", "np.unique(labels, return_counts=True) if timing: toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\")", "timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours) cluster.fit(img) if timing: toc = time.perf_counter()", "skimage import io from skimage.transform import rescale def cv2_dominant_colour(img_url, colours=10, timing=False): ''' Dominant", "cost of accuracy ''' if timing: start = time.perf_counter() tic = time.perf_counter() img", "numpy as np import time from cv2 import cv2 from sklearn.cluster import KMeans", "''' Dominant Colour method using sklearn, based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if timing: start", "colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] \", \"{:0.2f}%\".format(percent * 100)) end = start + (percent", "tic:0.2f}s\") if timing: tic = time.perf_counter() n_colours = colours criteria = (cv2.TERM_CRITERIA_EPS +", "time.perf_counter() img = io.imread(img_url) pixels = np.float32(img.reshape(-1, 3)) if timing: toc = time.perf_counter()", "criteria, 10, flags) labels = labels.flatten().tolist() _, counts = np.unique(labels, return_counts=True) if timing:", "dominant, labels, centroid, total_time def sklearn_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using", "total_time def visualise_colours(labels, centroids): ''' Generate a visualisation of the colours in an", "from skimage.transform import rescale def cv2_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using", "in {toc - tic:0.2f}s\") labels = cluster.labels_ centroid = cluster.cluster_centers_ if timing: tic", "= cluster.cluster_centers_ if timing: tic = time.perf_counter() percent = [] _, counts =", "toc = time.perf_counter() print(f\"KMeans calculation in {toc - tic:0.2f}s\") labels = cluster.labels_ labels", "''' if timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) img", "using sklearn, based on: https://medium.com/analytics-vidhya/colour-separation-in-an-image-using-kmeans-clustering-using-python-f994fa398454 ''' if timing: start = time.perf_counter() tic =", "time.perf_counter() img = io.imread(img_url) img = img.reshape((-1, 3)) if timing: toc = time.perf_counter()", "Colour method using open cv, based on https://stackoverflow.com/a/43111221/2523885 ''' if timing: start =", "of the colours in an image ''' # Get the number of different", "time from cv2 import cv2 from sklearn.cluster import KMeans from skimage import io", "= np.float32(img.reshape(-1, 3)) if timing: toc = time.perf_counter() print(f\"Loaded the image in {toc", "to print hex sting and return an rgb tuple of the dominant colour", "= centroid[np.argmax(counts)] if timing: toc = time.perf_counter() print(f\"Dominant selection in {toc - tic:0.2f}s\")", "- tic:0.2f}s\") if timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001)", "tuple of the dominant colour in an image ''' dominant_colour = fast_dominant_colour(img_url, scale=0.1)", "image in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() n_colours = colours", "print(f\"Percentage calculation in {toc - tic:0.2f}s\") indices = np.argsort(percent)[::-1] dominant = centroid[indices[0]] if", "= time.perf_counter() cluster = KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001) cluster.fit(img) if timing: toc =", "dominant, labels, centroid, total_time def fast_dominant_colour(img_url, colours=10, timing=False, scale=1.0): ''' Faster method for", "time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) pixels = np.float32(img.reshape(-1, 3)) if timing:", "= [] for i in range(len(centroid)): j = labels.count(i) j = j/(len(labels)) percent.append(j)", "in range(len(centroid)): j = counts[i] j = j/(len(labels)) percent.append(j) if timing: toc =", "import time from cv2 import cv2 from sklearn.cluster import KMeans from skimage import", "if timing: tic = time.perf_counter() dominant = centroid[np.argmax(counts)] if timing: toc = time.perf_counter()", "''' Utility function to return ints from 0-255 ''' return int(max(0, min(x, 255)))", "start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) pixels = np.float32(img.reshape(-1, 3))", "start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) img = img.reshape((-1, 3))", "np.unique(labels, return_counts=True) for i in range(len(centroid)): j = counts[i] j = j/(len(labels)) percent.append(j)", "tic = time.perf_counter() img = io.imread(img_url) img = img.reshape((-1, 3)) if timing: toc", "rescale def cv2_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method using open cv, based", "Also can use a scaling factor to improve the speed at cost of", "a scaling factor to improve the speed at cost of accuracy ''' if", "pixels, n_colours, None, criteria, 10, flags) labels = labels.flatten().tolist() _, counts = np.unique(labels,", "io from skimage.transform import rescale def cv2_dominant_colour(img_url, colours=10, timing=False): ''' Dominant Colour method", "{toc - tic:0.2f}s\") if timing: tic = time.perf_counter() n_colours = colours criteria =", "timing=False, scale=1.0): ''' Faster method for web use that speeds up the sklearn", "calculation in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() dominant = centroid[np.argmax(counts)]", "normalise sorted_labels = np.arange(0, len(np.unique(labels)) + 1) (hist, _) = np.histogram(labels, bins=sorted_labels) hist", "- start print(f\"fast_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def visualise_colours(labels,", "return int(max(0, min(x, 255))) def get_rgb_colour(img_url, debug=False): ''' Method to print hex sting", "print(f\"fast_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid, total_time def visualise_colours(labels, centroids): '''", "timing: tic = time.perf_counter() cluster = KMeans(n_clusters=colours, n_init=3, max_iter=10, tol=0.001) cluster.fit(img) if timing:", "cv2 from sklearn.cluster import KMeans from skimage import io from skimage.transform import rescale", "timing: tic = time.perf_counter() dominant = centroid[np.argmax(counts)] if timing: toc = time.perf_counter() print(f\"Dominant", "timing: tic = time.perf_counter() n_colours = colours criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200,", "* 300) cv2.rectangle(rect, (int(start), 0), (int(end), 50), colour.astype(\"uint8\").tolist(), -1) start = end return", "print(f\"KMeans calculation in {toc - tic:0.2f}s\") if timing: tic = time.perf_counter() dominant =", "{clamp(colour[0])}] \", \"{:0.2f}%\".format(percent * 100)) end = start + (percent * 300) cv2.rectangle(rect,", "dominant = centroid[np.argmax(counts)] if timing: toc = time.perf_counter() print(f\"Dominant selection in {toc -", "1) (hist, _) = np.histogram(labels, bins=sorted_labels) hist = hist.astype(\"float\") hist /= hist.sum() #", "_, counts = np.unique(labels, return_counts=True) if timing: toc = time.perf_counter() print(f\"KMeans calculation in", "= io.imread(img_url) if scale != 1.0: img = rescale(img, scale, multichannel=True) img =", "if timing: toc = time.perf_counter() print(f\"Dominant selection in {toc - tic:0.2f}s\") if timing:", "time.perf_counter() n_colours = colours criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1) flags =", "= 0 for (percent, colour) in colours: print(f\"[{clamp(colour[0])}, {clamp(colour[0])}, {clamp(colour[0])}] \", \"{:0.2f}%\".format(percent *", "hist.astype(\"float\") hist /= hist.sum() # Create frequency rect and iterate through each cluster's", "tic = time.perf_counter() img = io.imread(img_url) if scale != 1.0: img = rescale(img,", "centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter() percent = [] _, counts", "dtype=np.uint8) colours = sorted(zip(hist, centroids)) start = 0 for (percent, colour) in colours:", "dominant_colour[1] b = dominant_colour[2] if debug: hex_str = \"#{0:02x}{1:02x}{2:02x}\".format(clamp(r), clamp(g), clamp(b)) print(f'{hex_str}') rgb_colour", ".1) flags = cv2.KMEANS_RANDOM_CENTERS _, labels, centroid = cv2.kmeans( pixels, n_colours, None, criteria,", "colours in an image ''' # Get the number of different clusters, create", "sklearn.cluster import KMeans from skimage import io from skimage.transform import rescale def cv2_dominant_colour(img_url,", "total_time = end - start print(f\"cv2_dominant_colour execution in {total_time:0.2f}s\") return dominant, labels, centroid,", "list(labels) centroid = cluster.cluster_centers_ if timing: tic = time.perf_counter() percent = [] for", "timing: start = time.perf_counter() tic = time.perf_counter() img = io.imread(img_url) pixels = np.float32(img.reshape(-1," ]
[ "import json, getopt, sys import numpy as np from striped.client import CouchBaseBackend Usage", "-n means show data as numpy array of given dtype and shape python", "= CouchBaseBackend(Bucket) if json_data: data = cb[Key].json out = json.dumps(data, indent=4, sort_keys=True, separators=(',',", "= \"-j\" in opts data = None if \"-d\" in opts: data =", "open(out_file, \"w\") if out_file else sys.stdout out_file.write(out) else: data = cb[Key].data if out_file:", "= sys.argv[2:] if cmd == \"get\": show_as_np = False opts, args = getopt.getopt(args,", "data = open(opts[\"-f\"], \"rb\").read() if json_in: data = json.loads(data) cb = CouchBaseBackend(Bucket) if", "\"wb\").write(data) elif dtype: data = np.frombuffer(data, dtype=dtype) print(data.shape, data.dtype, data) else: print(len(data), repr(data[:100]))", "if cmd == \"get\": show_as_np = False opts, args = getopt.getopt(args, \"d:jo:\") opts", "= \"-j\" in opts Bucket, Key = args cb = CouchBaseBackend(Bucket) if json_data:", "': ')) out_file = open(out_file, \"w\") if out_file else sys.stdout out_file.write(out) else: data", "json_data = \"-j\" in opts Bucket, Key = args cb = CouchBaseBackend(Bucket) if", "Usage = \"\"\" python cb.py get [-j] [-o <file>|-d <dtype>] <bucket> <key> -n", "<bucket> <key> \"\"\" if not sys.argv[1:]: print(Usage) sys.exit(1) cmd = sys.argv[1] args =", "data.dtype, data) else: print(len(data), repr(data[:100])) elif cmd == \"put\": opts, args = getopt.getopt(args,", "opts Bucket, Key = args cb = CouchBaseBackend(Bucket) if json_data: data = cb[Key].json", "json_in = \"-j\" in opts data = None if \"-d\" in opts: data", "numpy as np from striped.client import CouchBaseBackend Usage = \"\"\" python cb.py get", "separators=(',', ': ')) out_file = open(out_file, \"w\") if out_file else sys.stdout out_file.write(out) else:", "not sys.argv[1:]: print(Usage) sys.exit(1) cmd = sys.argv[1] args = sys.argv[2:] if cmd ==", "in opts data = None if \"-d\" in opts: data = opts[\"-d\"] else:", "print(data.shape, data.dtype, data) else: print(len(data), repr(data[:100])) elif cmd == \"put\": opts, args =", "and shape python cb.py put [-j] [-f <file>|-d <data>] <bucket> <key> \"\"\" if", "CouchBaseBackend(Bucket) if json_data: data = cb[Key].json out = json.dumps(data, indent=4, sort_keys=True, separators=(',', ':", "data = None if \"-d\" in opts: data = opts[\"-d\"] else: data =", "<dtype>] <bucket> <key> -n means show data as numpy array of given dtype", "\"w\") if out_file else sys.stdout out_file.write(out) else: data = cb[Key].data if out_file: open(out_file,", "None if \"-d\" in opts: data = opts[\"-d\"] else: data = open(opts[\"-f\"], \"rb\").read()", "= dict(opts) dtype = opts.get(\"-d\") out_file = opts.get(\"-o\") json_data = \"-j\" in opts", "= args json_in = \"-j\" in opts data = None if \"-d\" in", "\"get\": show_as_np = False opts, args = getopt.getopt(args, \"d:jo:\") opts = dict(opts) dtype", "\"d:jo:\") opts = dict(opts) dtype = opts.get(\"-d\") out_file = opts.get(\"-o\") json_data = \"-j\"", "indent=4, sort_keys=True, separators=(',', ': ')) out_file = open(out_file, \"w\") if out_file else sys.stdout", "[-j] [-f <file>|-d <data>] <bucket> <key> \"\"\" if not sys.argv[1:]: print(Usage) sys.exit(1) cmd", "opts, args = getopt.getopt(args, \"d:jo:\") opts = dict(opts) dtype = opts.get(\"-d\") out_file =", "\"-j\" in opts data = None if \"-d\" in opts: data = opts[\"-d\"]", "Key = args json_in = \"-j\" in opts data = None if \"-d\"", "out_file = open(out_file, \"w\") if out_file else sys.stdout out_file.write(out) else: data = cb[Key].data", "opts data = None if \"-d\" in opts: data = opts[\"-d\"] else: data", "dtype = opts.get(\"-d\") out_file = opts.get(\"-o\") json_data = \"-j\" in opts Bucket, Key", "getopt, sys import numpy as np from striped.client import CouchBaseBackend Usage = \"\"\"", "getopt.getopt(args, \"d:jo:\") opts = dict(opts) dtype = opts.get(\"-d\") out_file = opts.get(\"-o\") json_data =", "if json_in: data = json.loads(data) cb = CouchBaseBackend(Bucket) if json_in: cb[Key].json = data", "print(len(data), repr(data[:100])) elif cmd == \"put\": opts, args = getopt.getopt(args, \"jf:d:\") opts =", "in opts: data = opts[\"-d\"] else: data = open(opts[\"-f\"], \"rb\").read() if json_in: data", "\"rb\").read() if json_in: data = json.loads(data) cb = CouchBaseBackend(Bucket) if json_in: cb[Key].json =", "get [-j] [-o <file>|-d <dtype>] <bucket> <key> -n means show data as numpy", "sys.exit(1) cmd = sys.argv[1] args = sys.argv[2:] if cmd == \"get\": show_as_np =", "= opts.get(\"-d\") out_file = opts.get(\"-o\") json_data = \"-j\" in opts Bucket, Key =", "= opts.get(\"-o\") json_data = \"-j\" in opts Bucket, Key = args cb =", "dict(opts) dtype = opts.get(\"-d\") out_file = opts.get(\"-o\") json_data = \"-j\" in opts Bucket,", "repr(data[:100])) elif cmd == \"put\": opts, args = getopt.getopt(args, \"jf:d:\") opts = dict(opts)", "array of given dtype and shape python cb.py put [-j] [-f <file>|-d <data>]", "if json_data: data = cb[Key].json out = json.dumps(data, indent=4, sort_keys=True, separators=(',', ': '))", "out = json.dumps(data, indent=4, sort_keys=True, separators=(',', ': ')) out_file = open(out_file, \"w\") if", "dict(opts) Bucket, Key = args json_in = \"-j\" in opts data = None", "else: data = open(opts[\"-f\"], \"rb\").read() if json_in: data = json.loads(data) cb = CouchBaseBackend(Bucket)", "= np.frombuffer(data, dtype=dtype) print(data.shape, data.dtype, data) else: print(len(data), repr(data[:100])) elif cmd == \"put\":", "opts.get(\"-o\") json_data = \"-j\" in opts Bucket, Key = args cb = CouchBaseBackend(Bucket)", "<key> \"\"\" if not sys.argv[1:]: print(Usage) sys.exit(1) cmd = sys.argv[1] args = sys.argv[2:]", "<filename>ingest/tools/cb.py import json, getopt, sys import numpy as np from striped.client import CouchBaseBackend", "import CouchBaseBackend Usage = \"\"\" python cb.py get [-j] [-o <file>|-d <dtype>] <bucket>", "shape python cb.py put [-j] [-f <file>|-d <data>] <bucket> <key> \"\"\" if not", "<key> -n means show data as numpy array of given dtype and shape", "= sys.argv[1] args = sys.argv[2:] if cmd == \"get\": show_as_np = False opts,", "as numpy array of given dtype and shape python cb.py put [-j] [-f", "args json_in = \"-j\" in opts data = None if \"-d\" in opts:", "of given dtype and shape python cb.py put [-j] [-f <file>|-d <data>] <bucket>", "\"-j\" in opts Bucket, Key = args cb = CouchBaseBackend(Bucket) if json_data: data", "cb[Key].data if out_file: open(out_file, \"wb\").write(data) elif dtype: data = np.frombuffer(data, dtype=dtype) print(data.shape, data.dtype,", "\"\"\" if not sys.argv[1:]: print(Usage) sys.exit(1) cmd = sys.argv[1] args = sys.argv[2:] if", "from striped.client import CouchBaseBackend Usage = \"\"\" python cb.py get [-j] [-o <file>|-d", "False opts, args = getopt.getopt(args, \"d:jo:\") opts = dict(opts) dtype = opts.get(\"-d\") out_file", "Bucket, Key = args json_in = \"-j\" in opts data = None if", "[-f <file>|-d <data>] <bucket> <key> \"\"\" if not sys.argv[1:]: print(Usage) sys.exit(1) cmd =", "\"put\": opts, args = getopt.getopt(args, \"jf:d:\") opts = dict(opts) Bucket, Key = args", "cmd = sys.argv[1] args = sys.argv[2:] if cmd == \"get\": show_as_np = False", "= cb[Key].json out = json.dumps(data, indent=4, sort_keys=True, separators=(',', ': ')) out_file = open(out_file,", "out_file: open(out_file, \"wb\").write(data) elif dtype: data = np.frombuffer(data, dtype=dtype) print(data.shape, data.dtype, data) else:", "cb[Key].json out = json.dumps(data, indent=4, sort_keys=True, separators=(',', ': ')) out_file = open(out_file, \"w\")", "opts = dict(opts) dtype = opts.get(\"-d\") out_file = opts.get(\"-o\") json_data = \"-j\" in", "np from striped.client import CouchBaseBackend Usage = \"\"\" python cb.py get [-j] [-o", "opts = dict(opts) Bucket, Key = args json_in = \"-j\" in opts data", "= False opts, args = getopt.getopt(args, \"d:jo:\") opts = dict(opts) dtype = opts.get(\"-d\")", "open(opts[\"-f\"], \"rb\").read() if json_in: data = json.loads(data) cb = CouchBaseBackend(Bucket) if json_in: cb[Key].json", "elif cmd == \"put\": opts, args = getopt.getopt(args, \"jf:d:\") opts = dict(opts) Bucket,", "Key = args cb = CouchBaseBackend(Bucket) if json_data: data = cb[Key].json out =", "args cb = CouchBaseBackend(Bucket) if json_data: data = cb[Key].json out = json.dumps(data, indent=4,", "data) else: print(len(data), repr(data[:100])) elif cmd == \"put\": opts, args = getopt.getopt(args, \"jf:d:\")", "\"\"\" python cb.py get [-j] [-o <file>|-d <dtype>] <bucket> <key> -n means show", "= getopt.getopt(args, \"jf:d:\") opts = dict(opts) Bucket, Key = args json_in = \"-j\"", "json.loads(data) cb = CouchBaseBackend(Bucket) if json_in: cb[Key].json = data else: cb[Key].data = data", "CouchBaseBackend Usage = \"\"\" python cb.py get [-j] [-o <file>|-d <dtype>] <bucket> <key>", "sys.argv[1] args = sys.argv[2:] if cmd == \"get\": show_as_np = False opts, args", "data as numpy array of given dtype and shape python cb.py put [-j]", "json.dumps(data, indent=4, sort_keys=True, separators=(',', ': ')) out_file = open(out_file, \"w\") if out_file else", "sort_keys=True, separators=(',', ': ')) out_file = open(out_file, \"w\") if out_file else sys.stdout out_file.write(out)", "np.frombuffer(data, dtype=dtype) print(data.shape, data.dtype, data) else: print(len(data), repr(data[:100])) elif cmd == \"put\": opts,", "open(out_file, \"wb\").write(data) elif dtype: data = np.frombuffer(data, dtype=dtype) print(data.shape, data.dtype, data) else: print(len(data),", "if out_file: open(out_file, \"wb\").write(data) elif dtype: data = np.frombuffer(data, dtype=dtype) print(data.shape, data.dtype, data)", "= json.dumps(data, indent=4, sort_keys=True, separators=(',', ': ')) out_file = open(out_file, \"w\") if out_file", "else: print(len(data), repr(data[:100])) elif cmd == \"put\": opts, args = getopt.getopt(args, \"jf:d:\") opts", "= opts[\"-d\"] else: data = open(opts[\"-f\"], \"rb\").read() if json_in: data = json.loads(data) cb", "given dtype and shape python cb.py put [-j] [-f <file>|-d <data>] <bucket> <key>", "striped.client import CouchBaseBackend Usage = \"\"\" python cb.py get [-j] [-o <file>|-d <dtype>]", "json_in: data = json.loads(data) cb = CouchBaseBackend(Bucket) if json_in: cb[Key].json = data else:", "= open(out_file, \"w\") if out_file else sys.stdout out_file.write(out) else: data = cb[Key].data if", "sys.argv[2:] if cmd == \"get\": show_as_np = False opts, args = getopt.getopt(args, \"d:jo:\")", "out_file.write(out) else: data = cb[Key].data if out_file: open(out_file, \"wb\").write(data) elif dtype: data =", "opts[\"-d\"] else: data = open(opts[\"-f\"], \"rb\").read() if json_in: data = json.loads(data) cb =", "data = np.frombuffer(data, dtype=dtype) print(data.shape, data.dtype, data) else: print(len(data), repr(data[:100])) elif cmd ==", "python cb.py get [-j] [-o <file>|-d <dtype>] <bucket> <key> -n means show data", "cb.py put [-j] [-f <file>|-d <data>] <bucket> <key> \"\"\" if not sys.argv[1:]: print(Usage)", "= open(opts[\"-f\"], \"rb\").read() if json_in: data = json.loads(data) cb = CouchBaseBackend(Bucket) if json_in:", "= json.loads(data) cb = CouchBaseBackend(Bucket) if json_in: cb[Key].json = data else: cb[Key].data =", "data = cb[Key].data if out_file: open(out_file, \"wb\").write(data) elif dtype: data = np.frombuffer(data, dtype=dtype)", "data = opts[\"-d\"] else: data = open(opts[\"-f\"], \"rb\").read() if json_in: data = json.loads(data)", "dtype and shape python cb.py put [-j] [-f <file>|-d <data>] <bucket> <key> \"\"\"", "if \"-d\" in opts: data = opts[\"-d\"] else: data = open(opts[\"-f\"], \"rb\").read() if", "= args cb = CouchBaseBackend(Bucket) if json_data: data = cb[Key].json out = json.dumps(data,", "as np from striped.client import CouchBaseBackend Usage = \"\"\" python cb.py get [-j]", "means show data as numpy array of given dtype and shape python cb.py", "dtype: data = np.frombuffer(data, dtype=dtype) print(data.shape, data.dtype, data) else: print(len(data), repr(data[:100])) elif cmd", "= None if \"-d\" in opts: data = opts[\"-d\"] else: data = open(opts[\"-f\"],", "= getopt.getopt(args, \"d:jo:\") opts = dict(opts) dtype = opts.get(\"-d\") out_file = opts.get(\"-o\") json_data", "== \"put\": opts, args = getopt.getopt(args, \"jf:d:\") opts = dict(opts) Bucket, Key =", "json_data: data = cb[Key].json out = json.dumps(data, indent=4, sort_keys=True, separators=(',', ': ')) out_file", "sys import numpy as np from striped.client import CouchBaseBackend Usage = \"\"\" python", "args = getopt.getopt(args, \"d:jo:\") opts = dict(opts) dtype = opts.get(\"-d\") out_file = opts.get(\"-o\")", "out_file = opts.get(\"-o\") json_data = \"-j\" in opts Bucket, Key = args cb", "out_file else sys.stdout out_file.write(out) else: data = cb[Key].data if out_file: open(out_file, \"wb\").write(data) elif", "opts: data = opts[\"-d\"] else: data = open(opts[\"-f\"], \"rb\").read() if json_in: data =", "= cb[Key].data if out_file: open(out_file, \"wb\").write(data) elif dtype: data = np.frombuffer(data, dtype=dtype) print(data.shape,", "put [-j] [-f <file>|-d <data>] <bucket> <key> \"\"\" if not sys.argv[1:]: print(Usage) sys.exit(1)", "dtype=dtype) print(data.shape, data.dtype, data) else: print(len(data), repr(data[:100])) elif cmd == \"put\": opts, args", "else sys.stdout out_file.write(out) else: data = cb[Key].data if out_file: open(out_file, \"wb\").write(data) elif dtype:", "sys.stdout out_file.write(out) else: data = cb[Key].data if out_file: open(out_file, \"wb\").write(data) elif dtype: data", "numpy array of given dtype and shape python cb.py put [-j] [-f <file>|-d", "<file>|-d <data>] <bucket> <key> \"\"\" if not sys.argv[1:]: print(Usage) sys.exit(1) cmd = sys.argv[1]", "<data>] <bucket> <key> \"\"\" if not sys.argv[1:]: print(Usage) sys.exit(1) cmd = sys.argv[1] args", "Bucket, Key = args cb = CouchBaseBackend(Bucket) if json_data: data = cb[Key].json out", "= dict(opts) Bucket, Key = args json_in = \"-j\" in opts data =", "<bucket> <key> -n means show data as numpy array of given dtype and", "opts, args = getopt.getopt(args, \"jf:d:\") opts = dict(opts) Bucket, Key = args json_in", "')) out_file = open(out_file, \"w\") if out_file else sys.stdout out_file.write(out) else: data =", "print(Usage) sys.exit(1) cmd = sys.argv[1] args = sys.argv[2:] if cmd == \"get\": show_as_np", "args = getopt.getopt(args, \"jf:d:\") opts = dict(opts) Bucket, Key = args json_in =", "== \"get\": show_as_np = False opts, args = getopt.getopt(args, \"d:jo:\") opts = dict(opts)", "elif dtype: data = np.frombuffer(data, dtype=dtype) print(data.shape, data.dtype, data) else: print(len(data), repr(data[:100])) elif", "show_as_np = False opts, args = getopt.getopt(args, \"d:jo:\") opts = dict(opts) dtype =", "data = json.loads(data) cb = CouchBaseBackend(Bucket) if json_in: cb[Key].json = data else: cb[Key].data", "data = cb[Key].json out = json.dumps(data, indent=4, sort_keys=True, separators=(',', ': ')) out_file =", "in opts Bucket, Key = args cb = CouchBaseBackend(Bucket) if json_data: data =", "[-o <file>|-d <dtype>] <bucket> <key> -n means show data as numpy array of", "args = sys.argv[2:] if cmd == \"get\": show_as_np = False opts, args =", "\"jf:d:\") opts = dict(opts) Bucket, Key = args json_in = \"-j\" in opts", "opts.get(\"-d\") out_file = opts.get(\"-o\") json_data = \"-j\" in opts Bucket, Key = args", "python cb.py put [-j] [-f <file>|-d <data>] <bucket> <key> \"\"\" if not sys.argv[1:]:", "sys.argv[1:]: print(Usage) sys.exit(1) cmd = sys.argv[1] args = sys.argv[2:] if cmd == \"get\":", "import numpy as np from striped.client import CouchBaseBackend Usage = \"\"\" python cb.py", "show data as numpy array of given dtype and shape python cb.py put", "[-j] [-o <file>|-d <dtype>] <bucket> <key> -n means show data as numpy array", "cmd == \"put\": opts, args = getopt.getopt(args, \"jf:d:\") opts = dict(opts) Bucket, Key", "json, getopt, sys import numpy as np from striped.client import CouchBaseBackend Usage =", "cmd == \"get\": show_as_np = False opts, args = getopt.getopt(args, \"d:jo:\") opts =", "<file>|-d <dtype>] <bucket> <key> -n means show data as numpy array of given", "else: data = cb[Key].data if out_file: open(out_file, \"wb\").write(data) elif dtype: data = np.frombuffer(data,", "if not sys.argv[1:]: print(Usage) sys.exit(1) cmd = sys.argv[1] args = sys.argv[2:] if cmd", "cb = CouchBaseBackend(Bucket) if json_data: data = cb[Key].json out = json.dumps(data, indent=4, sort_keys=True,", "\"-d\" in opts: data = opts[\"-d\"] else: data = open(opts[\"-f\"], \"rb\").read() if json_in:", "cb.py get [-j] [-o <file>|-d <dtype>] <bucket> <key> -n means show data as", "if out_file else sys.stdout out_file.write(out) else: data = cb[Key].data if out_file: open(out_file, \"wb\").write(data)", "= \"\"\" python cb.py get [-j] [-o <file>|-d <dtype>] <bucket> <key> -n means", "getopt.getopt(args, \"jf:d:\") opts = dict(opts) Bucket, Key = args json_in = \"-j\" in" ]
[ "# Creates the final rule return structure # def phase_coda(ctx, g): return struct(", "coda # # Creates the final rule return structure # def phase_coda(ctx, g):", "final rule return structure # def phase_coda(ctx, g): return struct( java = g.ijinfo.intellij_info,", "structure # def phase_coda(ctx, g): return struct( java = g.ijinfo.intellij_info, providers = g.out.providers,", "# # PHASE: coda # # Creates the final rule return structure #", "# PHASE: coda # # Creates the final rule return structure # def", "rule return structure # def phase_coda(ctx, g): return struct( java = g.ijinfo.intellij_info, providers", "# def phase_coda(ctx, g): return struct( java = g.ijinfo.intellij_info, providers = g.out.providers, )", "Creates the final rule return structure # def phase_coda(ctx, g): return struct( java", "return structure # def phase_coda(ctx, g): return struct( java = g.ijinfo.intellij_info, providers =", "# # Creates the final rule return structure # def phase_coda(ctx, g): return", "the final rule return structure # def phase_coda(ctx, g): return struct( java =", "PHASE: coda # # Creates the final rule return structure # def phase_coda(ctx," ]
[ "Tuple[str, str, bool]: \"\"\" Prepare the normal and eager payloads and decide if", "ScrapliCfgResponse: \"\"\" Handle post \"clear_config_sessions\" operations for parity between sync and async Args:", "session name attributes -- when these are \"empty\" we know there is no", "self._prepare_config_payloads(config=config) register_config_session = False if not self.config_session_name: self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session name", "session which we do not want config = re.sub(pattern=END_PATTERN, repl=\"!\", string=config) # find", "object to update w/ get results Raises: N/A \"\"\" self.logger.info(\"clear_config_sessions requested\") response =", "and config session name\") self.candidate_config = \"\" self.config_session_name = \"\" def _normalize_source_candidate_configs(self, source_config:", "not want config = re.sub(pattern=END_PATTERN, repl=\"!\", string=config) # find all sections that need", "_reset_config_session(self) -> None: \"\"\" Reset config session info Resets the candidate config and", "objects from fetching the version Returns: ScrapliCfgResponse: response object containing string of the", "device and \"eager\" mode config elements to send to the device (things like", "N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses) if response.failed: msg = \"failed to clear device configuration session(s)\"", "self, response: ScrapliCfgResponse, scrapli_responses: Iterable[Response], ) -> ScrapliCfgResponse: \"\"\" Handle post \"clear_config_sessions\" operations", "tuple: tuple containing \"normal\" config elements to send to the device and \"eager\"", "config of the source config store Returns: ScrapliCfgDiff: scrapli cfg diff object Raises:", "diff object Raises: N/A \"\"\" self.logger.debug(\"normalizing source and candidate configs for diff object\")", "\"\"\" Handle pre \"clear_config_sessions\" operations for parity between sync and async Args: N/A", "\"\"\" Handle post \"clear_config_sessions\" operations for parity between sync and async Args: response:", "register a config session Args: config: candidate config to load Returns: tuple: tuple", "Iterable, List, Tuple, Union from scrapli.driver import AsyncNetworkDriver, NetworkDriver from scrapli.response import Response", "string=config) for eager_section in eager_config: config = config.replace(eager_section, \"!\") joined_eager_config = \"\\n\".join(captured_section for", "-> Tuple[str, str]: \"\"\" Prepare a configuration so it can be nicely sent", "session name\") self.candidate_config = \"\" self.config_session_name = \"\" def _normalize_source_candidate_configs(self, source_config: str) ->", "the candidate config and config session name attributes -- when these are \"empty\"", "config if present - if its present it will drop scrapli out #", "tuple containing \"normal\" config elements to send to the device and \"eager\" mode", "easily diff them Args: source_config: current config of the source config store Returns:", "\"!\") joined_eager_config = \"\\n\".join(captured_section for captured_section in eager_config) return config, joined_eager_config def _prepare_load_config_session_and_payload(self,", "= False if not self.config_session_name: self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session name will be", "show version command Returns: str: device version string Raises: N/A \"\"\" version_string_search =", "Return command to use to get config based on the provided source Args:", "if line) candidate_config = re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\" ) candidate_config = \"\\n\".join(line for", "host=self.conn.host, raise_for_status_exception=ScrapliCfgException ) return response def _post_clear_config_sessions( self, response: ScrapliCfgResponse, scrapli_responses: Iterable[Response], )", "repl=\"!\", string=config) # find all sections that need to be \"eagerly\" sent eager_config", "that require scrapli \"eager=True\"), and lastly a bool indicating if the config session", "and config session name attributes -- when these are \"empty\" we know there", "command to use to get config based on the provided source Args: source:", "N/A \"\"\" version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output) if not version_string_search: return \"\" version_string =", "\"\"\" Prepare the normal and eager payloads and decide if we need to", "_prepare_config_payloads(config: str) -> Tuple[str, str]: \"\"\" Prepare a configuration so it can be", "between sync and async Args: response: response object to update scrapli_responses: list of", "not self.config_session_name: self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session name will be '{self.config_session_name}'\") register_config_session =", "we dont modify the user provided candidate config which can totally have #", "candidate_config = re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\" ) candidate_config = \"\\n\".join(line for line in", "provided source Args: source: name of the config source, generally running|startup Returns: str:", "\"\" version_string = version_string_search.group(0) or \"\" return version_string @staticmethod def _parse_config_sessions(device_output: str) ->", "version string Raises: N/A \"\"\" version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output) if not version_string_search: return", "candidate config which can totally have # those comment lines - we only", "( BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, ) from scrapli_cfg.response import ScrapliCfgResponse CONFIG_SOURCES = [", "we do not want config = re.sub(pattern=END_PATTERN, repl=\"!\", string=config) # find all sections", "version_string_search.group(0) or \"\" return version_string @staticmethod def _parse_config_sessions(device_output: str) -> List[str]: \"\"\" Parse", "(things like banners/macro that require scrapli \"eager=True\"), and lastly a bool indicating if", "and candidate configs -- this is only done # here pre-diff, so we", "name of the config source, generally running|startup Returns: str: command to use to", "diff object\") # Remove all comment lines from both the source and candidate", "# of the config session which we do not want config = re.sub(pattern=END_PATTERN,", "lines config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config) # remove \"end\" at the end of", "string out of device output Args: device_output: output from show version command Returns:", "command to use to fetch the requested config Raises: N/A \"\"\" if source", "= re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config) # remove \"end\" at the end of config if", "# attached to interfaces and the stuff will remain source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config,", "eager_config) return config, joined_eager_config def _prepare_load_config_session_and_payload(self, config: str) -> Tuple[str, str, bool]: \"\"\"", "pre \"clear_config_sessions\" operations for parity between sync and async Args: N/A Returns: ScrapliCfgResponse:", "scrapli cfg diff object Raises: N/A \"\"\" self.logger.debug(\"normalizing source and candidate configs for", "string=config) # remove \"end\" at the end of config if present - if", "register_config_session def _reset_config_session(self) -> None: \"\"\" Reset config session info Resets the candidate", "import ( BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, ) from scrapli_cfg.response import ScrapliCfgResponse CONFIG_SOURCES =", "out of device output Args: device_output: output from show version command Returns: str:", "\"\" return version_string @staticmethod def _parse_config_sessions(device_output: str) -> List[str]: \"\"\" Parse config session", "Returns: tuple: tuple of \"normal\" config lines and \"eager\" config lines Raises: N/A", "# find all sections that need to be \"eagerly\" sent eager_config = re.findall(pattern=BANNER_PATTERN,", "config session name\") self.candidate_config = \"\" self.config_session_name = \"\" def _normalize_source_candidate_configs(self, source_config: str)", "scrapli_responses: Iterable[Response], ) -> ScrapliCfgResponse: \"\"\" Handle post \"clear_config_sessions\" operations for parity between", "config elements to send to the device and \"eager\" mode config elements to", "have # those comment lines - we only remove \"global\" (top level) comments", "str: device version string Raises: N/A \"\"\" version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output) if not", "\"\" self.config_session_name = \"\" def _normalize_source_candidate_configs(self, source_config: str) -> Tuple[str, str]: \"\"\" Normalize", "config store Returns: ScrapliCfgDiff: scrapli cfg diff object Raises: N/A \"\"\" self.logger.debug(\"normalizing source", "config elements to send to the device (things like banners/macro that require scrapli", "to fetch the requested config Raises: N/A \"\"\" if source == \"running\": return", "_post_clear_config_sessions( self, response: ScrapliCfgResponse, scrapli_responses: Iterable[Response], ) -> ScrapliCfgResponse: \"\"\" Handle post \"clear_config_sessions\"", "sessions @staticmethod def _get_config_command(source: str) -> str: \"\"\" Return command to use to", "as the `result` attribute Raises: N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses) if response.failed: msg = \"failed", "\"\"\" self.logger.debug(\"normalizing source and candidate configs for diff object\") # Remove all comment", "be nicely sent to the device via scrapli Args: config: configuration to prep", "of the version as the `result` attribute Raises: N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses) if response.failed:", "totally have # those comment lines - we only remove \"global\" (top level)", "return version_string @staticmethod def _parse_config_sessions(device_output: str) -> List[str]: \"\"\" Parse config session names", "line in candidate_config.splitlines() if line) return source_config, candidate_config def _pre_clear_config_sessions(self) -> ScrapliCfgResponse: \"\"\"", "json.loads(device_output) except json.JSONDecodeError: return [] sessions = list(config_session_dict.get(\"sessions\", {})) return sessions @staticmethod def", "generally running|startup Returns: str: command to use to fetch the requested config Raises:", "Args: config: candidate config to load Returns: tuple: tuple containing \"normal\" config elements", "self.logger.debug(\"resetting candidate config and config session name\") self.candidate_config = \"\" self.config_session_name = \"\"", "config = config.replace(eager_section, \"!\") joined_eager_config = \"\\n\".join(captured_section for captured_section in eager_config) return config,", "import ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns import ( BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, ) from scrapli_cfg.response", "lines and \"eager\" config lines Raises: N/A \"\"\" # remove comment lines config", "source_config = \"\\n\".join(line for line in source_config.splitlines() if line) candidate_config = re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN,", "can be nicely sent to the device via scrapli Args: config: configuration to", "def _parse_version(device_output: str) -> str: \"\"\" Parse version string out of device output", "provided candidate config which can totally have # those comment lines - we", "be '{self.config_session_name}'\") register_config_session = True return config, eager_config, register_config_session def _reset_config_session(self) -> None:", "source and candidate configs for diff object\") # Remove all comment lines from", "of scrapli response objects from fetching the version Returns: ScrapliCfgResponse: response object containing", "containing \"normal\" config elements to send to the device and \"eager\" mode config", "msg = \"failed to clear device configuration session(s)\" self.logger.critical(msg) response.result = msg else:", "version command Returns: list[str]: config session names Raises: N/A \"\"\" try: config_session_dict =", "of the config source, generally running|startup Returns: str: command to use to fetch", "to send to the device and \"eager\" mode config elements to send to", "= True return config, eager_config, register_config_session def _reset_config_session(self) -> None: \"\"\" Reset config", "= re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\") source_config = \"\\n\".join(line for line in source_config.splitlines() if line)", "Returns: str: device version string Raises: N/A \"\"\" version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output) if", "bool indicating if the config session needs to be registered on the device", "self.candidate_config = \"\" self.config_session_name = \"\" def _normalize_source_candidate_configs(self, source_config: str) -> Tuple[str, str]:", "interfaces and the stuff will remain source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\") source_config =", "config source, generally running|startup Returns: str: command to use to fetch the requested", "so it can be nicely sent to the device via scrapli Args: config:", "Raises: N/A \"\"\" # remove comment lines config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config) #", "get results Raises: N/A \"\"\" self.logger.info(\"clear_config_sessions requested\") response = ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException )", "startup-config\" @staticmethod def _prepare_config_payloads(config: str) -> Tuple[str, str]: \"\"\" Prepare a configuration so", "\"\"\" try: config_session_dict = json.loads(device_output) except json.JSONDecodeError: return [] sessions = list(config_session_dict.get(\"sessions\", {}))", "try: config_session_dict = json.loads(device_output) except json.JSONDecodeError: return [] sessions = list(config_session_dict.get(\"sessions\", {})) return", "out of device output Args: device_output: output from show version command Returns: list[str]:", "name\") self.candidate_config = \"\" self.config_session_name = \"\" def _normalize_source_candidate_configs(self, source_config: str) -> Tuple[str,", "present it will drop scrapli out # of the config session which we", "str: \"\"\" Parse version string out of device output Args: device_output: output from", "(top level) comments though... user comments # attached to interfaces and the stuff", "if source == \"running\": return \"show running-config\" return \"show startup-config\" @staticmethod def _prepare_config_payloads(config:", "None: \"\"\" Reset config session info Resets the candidate config and config session", "comment lines from both the source and candidate configs -- this is only", "\"eager=True\"), and lastly a bool indicating if the config session needs to be", "and candidate configs for diff object\") # Remove all comment lines from both", "_normalize_source_candidate_configs(self, source_config: str) -> Tuple[str, str]: \"\"\" Normalize candidate config and source config", "\"\"\" version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output) if not version_string_search: return \"\" version_string = version_string_search.group(0)", "response: response object to update scrapli_responses: list of scrapli response objects from fetching", "comments though... user comments # attached to interfaces and the stuff will remain", "in source_config.splitlines() if line) candidate_config = re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\" ) candidate_config =", "logger: LoggerAdapter config_sources: List[str] config_session_name: str candidate_config: str @staticmethod def _parse_version(device_output: str) ->", "device_output: output from show version command Returns: str: device version string Raises: N/A", "response.failed: msg = \"failed to clear device configuration session(s)\" self.logger.critical(msg) response.result = msg", "the config session which we do not want config = re.sub(pattern=END_PATTERN, repl=\"!\", string=config)", "import json import re from datetime import datetime from logging import LoggerAdapter from", "all comment lines from both the source and candidate configs -- this is", "candidate configs -- this is only done # here pre-diff, so we dont", "session name will be '{self.config_session_name}'\") register_config_session = True return config, eager_config, register_config_session def", "def _prepare_load_config_session_and_payload(self, config: str) -> Tuple[str, str, bool]: \"\"\" Prepare the normal and", "this is only done # here pre-diff, so we dont modify the user", "tuple: tuple of \"normal\" config lines and \"eager\" config lines Raises: N/A \"\"\"", "name attributes -- when these are \"empty\" we know there is no current", "it can be nicely sent to the device via scrapli Args: config: configuration", "\"\"\" self.logger.debug(\"resetting candidate config and config session name\") self.candidate_config = \"\" self.config_session_name =", "for line in candidate_config.splitlines() if line) return source_config, candidate_config def _pre_clear_config_sessions(self) -> ScrapliCfgResponse:", "Args: N/A Returns: None Raises: N/A \"\"\" self.logger.debug(\"resetting candidate config and config session", "Iterable[Response], ) -> ScrapliCfgResponse: \"\"\" Handle post \"clear_config_sessions\" operations for parity between sync", "self.logger.debug(\"normalizing source and candidate configs for diff object\") # Remove all comment lines", "eager_config: config = config.replace(eager_section, \"!\") joined_eager_config = \"\\n\".join(captured_section for captured_section in eager_config) return", "- we only remove \"global\" (top level) comments though... user comments # attached", "comment lines config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config) # remove \"end\" at the end", "in eager_config) return config, joined_eager_config def _prepare_load_config_session_and_payload(self, config: str) -> Tuple[str, str, bool]:", "config, joined_eager_config def _prepare_load_config_session_and_payload(self, config: str) -> Tuple[str, str, bool]: \"\"\" Prepare the", "\"\"\" config, eager_config = self._prepare_config_payloads(config=config) register_config_session = False if not self.config_session_name: self.config_session_name =", "object\") # Remove all comment lines from both the source and candidate configs", "repl=\"!\", string=config) # remove \"end\" at the end of config if present -", "can totally have # those comment lines - we only remove \"global\" (top", "those comment lines - we only remove \"global\" (top level) comments though... user", "candidate config and config session name\") self.candidate_config = \"\" self.config_session_name = \"\" def", "ScrapliCfgDiff: scrapli cfg diff object Raises: N/A \"\"\" self.logger.debug(\"normalizing source and candidate configs", "present - if its present it will drop scrapli out # of the", "to be registered on the device Raises: N/A \"\"\" config, eager_config = self._prepare_config_payloads(config=config)", "ScrapliCfgResponse CONFIG_SOURCES = [ \"running\", \"startup\", ] class ScrapliCfgEOSBase: conn: Union[NetworkDriver, AsyncNetworkDriver] logger:", "we know there is no current config session Args: N/A Returns: None Raises:", "_pre_clear_config_sessions(self) -> ScrapliCfgResponse: \"\"\" Handle pre \"clear_config_sessions\" operations for parity between sync and", "ScrapliCfgResponse: new response object to update w/ get results Raises: N/A \"\"\" self.logger.info(\"clear_config_sessions", "cfg diff object Raises: N/A \"\"\" self.logger.debug(\"normalizing source and candidate configs for diff", "List[str] config_session_name: str candidate_config: str @staticmethod def _parse_version(device_output: str) -> str: \"\"\" Parse", "'{self.config_session_name}'\") register_config_session = True return config, eager_config, register_config_session def _reset_config_session(self) -> None: \"\"\"", "-> List[str]: \"\"\" Parse config session names out of device output Args: device_output:", "Handle post \"clear_config_sessions\" operations for parity between sync and async Args: response: response", "the `result` attribute Raises: N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses) if response.failed: msg = \"failed to", "import ScrapliCfgResponse CONFIG_SOURCES = [ \"running\", \"startup\", ] class ScrapliCfgEOSBase: conn: Union[NetworkDriver, AsyncNetworkDriver]", "config lines Raises: N/A \"\"\" # remove comment lines config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\",", "via scrapli Args: config: configuration to prep Returns: tuple: tuple of \"normal\" config", "config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config) # remove \"end\" at the end of config", "for parity between sync and async Args: N/A Returns: ScrapliCfgResponse: new response object", "\"\"\" Reset config session info Resets the candidate config and config session name", "[ \"running\", \"startup\", ] class ScrapliCfgEOSBase: conn: Union[NetworkDriver, AsyncNetworkDriver] logger: LoggerAdapter config_sources: List[str]", "results Raises: N/A \"\"\" self.logger.info(\"clear_config_sessions requested\") response = ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException ) return", "and async Args: N/A Returns: ScrapliCfgResponse: new response object to update w/ get", "like banners/macro that require scrapli \"eager=True\"), and lastly a bool indicating if the", "candidate config to load Returns: tuple: tuple containing \"normal\" config elements to send", "Returns: None Raises: N/A \"\"\" self.logger.debug(\"resetting candidate config and config session name\") self.candidate_config", "\"\\n\".join(line for line in source_config.splitlines() if line) candidate_config = re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\"", "w/ get results Raises: N/A \"\"\" self.logger.info(\"clear_config_sessions requested\") response = ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException", "-> str: \"\"\" Return command to use to get config based on the", "update w/ get results Raises: N/A \"\"\" self.logger.info(\"clear_config_sessions requested\") response = ScrapliCfgResponse( host=self.conn.host,", ") -> ScrapliCfgResponse: \"\"\" Handle post \"clear_config_sessions\" operations for parity between sync and", "operations for parity between sync and async Args: N/A Returns: ScrapliCfgResponse: new response", "N/A Returns: ScrapliCfgResponse: new response object to update w/ get results Raises: N/A", "level) comments though... user comments # attached to interfaces and the stuff will", "lines - we only remove \"global\" (top level) comments though... user comments #", "elements to send to the device and \"eager\" mode config elements to send", "in eager_config: config = config.replace(eager_section, \"!\") joined_eager_config = \"\\n\".join(captured_section for captured_section in eager_config)", "\"global\" (top level) comments though... user comments # attached to interfaces and the", "Prepare a configuration so it can be nicely sent to the device via", "will remain source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\") source_config = \"\\n\".join(line for line in", "import Iterable, List, Tuple, Union from scrapli.driver import AsyncNetworkDriver, NetworkDriver from scrapli.response import", "def _get_config_command(source: str) -> str: \"\"\" Return command to use to get config", "config, eager_config = self._prepare_config_payloads(config=config) register_config_session = False if not self.config_session_name: self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\"", "str @staticmethod def _parse_version(device_output: str) -> str: \"\"\" Parse version string out of", "N/A \"\"\" # remove comment lines config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config) # remove", "ScrapliCfgEOSBase: conn: Union[NetworkDriver, AsyncNetworkDriver] logger: LoggerAdapter config_sources: List[str] config_session_name: str candidate_config: str @staticmethod", "to the device (things like banners/macro that require scrapli \"eager=True\"), and lastly a", "= list(config_session_dict.get(\"sessions\", {})) return sessions @staticmethod def _get_config_command(source: str) -> str: \"\"\" Return", "get config based on the provided source Args: source: name of the config", "requested\") response = ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException ) return response def _post_clear_config_sessions( self, response:", "str: command to use to fetch the requested config Raises: N/A \"\"\" if", "Args: response: response object to update scrapli_responses: list of scrapli response objects from", "of the source config store Returns: ScrapliCfgDiff: scrapli cfg diff object Raises: N/A", "re.findall(pattern=BANNER_PATTERN, string=config) for eager_section in eager_config: config = config.replace(eager_section, \"!\") joined_eager_config = \"\\n\".join(captured_section", "Raises: N/A \"\"\" self.logger.debug(\"resetting candidate config and config session name\") self.candidate_config = \"\"", "config = re.sub(pattern=END_PATTERN, repl=\"!\", string=config) # find all sections that need to be", "Union[NetworkDriver, AsyncNetworkDriver] logger: LoggerAdapter config_sources: List[str] config_session_name: str candidate_config: str @staticmethod def _parse_version(device_output:", "Args: source_config: current config of the source config store Returns: ScrapliCfgDiff: scrapli cfg", "source and candidate configs -- this is only done # here pre-diff, so", "the device Raises: N/A \"\"\" config, eager_config = self._prepare_config_payloads(config=config) register_config_session = False if", "-- when these are \"empty\" we know there is no current config session", "running-config\" return \"show startup-config\" @staticmethod def _prepare_config_payloads(config: str) -> Tuple[str, str]: \"\"\" Prepare", "config and config session name attributes -- when these are \"empty\" we know", "out # of the config session which we do not want config =", "will be '{self.config_session_name}'\") register_config_session = True return config, eager_config, register_config_session def _reset_config_session(self) ->", "output Args: device_output: output from show version command Returns: str: device version string", "str, bool]: \"\"\" Prepare the normal and eager payloads and decide if we", "session names Raises: N/A \"\"\" try: config_session_dict = json.loads(device_output) except json.JSONDecodeError: return []", "will drop scrapli out # of the config session which we do not", "remain source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\") source_config = \"\\n\".join(line for line in source_config.splitlines()", "the device (things like banners/macro that require scrapli \"eager=True\"), and lastly a bool", "= config.replace(eager_section, \"!\") joined_eager_config = \"\\n\".join(captured_section for captured_section in eager_config) return config, joined_eager_config", "and \"eager\" config lines Raises: N/A \"\"\" # remove comment lines config =", "Tuple[str, str]: \"\"\" Normalize candidate config and source config so that we can", "config session name attributes -- when these are \"empty\" we know there is", "banners/macro that require scrapli \"eager=True\"), and lastly a bool indicating if the config", "\"\"\" response.record_response(scrapli_responses=scrapli_responses) if response.failed: msg = \"failed to clear device configuration session(s)\" self.logger.critical(msg)", "self.logger.debug(f\"configuration session name will be '{self.config_session_name}'\") register_config_session = True return config, eager_config, register_config_session", "Args: N/A Returns: ScrapliCfgResponse: new response object to update w/ get results Raises:", "= ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException ) return response def _post_clear_config_sessions( self, response: ScrapliCfgResponse, scrapli_responses:", "_parse_config_sessions(device_output: str) -> List[str]: \"\"\" Parse config session names out of device output", "@staticmethod def _get_config_command(source: str) -> str: \"\"\" Return command to use to get", "output from show version command Returns: str: device version string Raises: N/A \"\"\"", "source == \"running\": return \"show running-config\" return \"show startup-config\" @staticmethod def _prepare_config_payloads(config: str)", "if its present it will drop scrapli out # of the config session", "sent eager_config = re.findall(pattern=BANNER_PATTERN, string=config) for eager_section in eager_config: config = config.replace(eager_section, \"!\")", "str) -> Tuple[str, str, bool]: \"\"\" Prepare the normal and eager payloads and", "config session Args: config: candidate config to load Returns: tuple: tuple containing \"normal\"", "if not self.config_session_name: self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session name will be '{self.config_session_name}'\") register_config_session", "device (things like banners/macro that require scrapli \"eager=True\"), and lastly a bool indicating", "from both the source and candidate configs -- this is only done #", "parity between sync and async Args: response: response object to update scrapli_responses: list", "config session needs to be registered on the device Raises: N/A \"\"\" config,", "comment lines - we only remove \"global\" (top level) comments though... user comments", "Returns: str: command to use to fetch the requested config Raises: N/A \"\"\"", "on the device Raises: N/A \"\"\" config, eager_config = self._prepare_config_payloads(config=config) register_config_session = False", "the source config store Returns: ScrapliCfgDiff: scrapli cfg diff object Raises: N/A \"\"\"", "done # here pre-diff, so we dont modify the user provided candidate config", "candidate_config.splitlines() if line) return source_config, candidate_config def _pre_clear_config_sessions(self) -> ScrapliCfgResponse: \"\"\" Handle pre", "string=config) # find all sections that need to be \"eagerly\" sent eager_config =", "\"\\n\".join(captured_section for captured_section in eager_config) return config, joined_eager_config def _prepare_load_config_session_and_payload(self, config: str) ->", "payloads and decide if we need to register a config session Args: config:", "# remove \"end\" at the end of config if present - if its", "config which can totally have # those comment lines - we only remove", "config session Args: N/A Returns: None Raises: N/A \"\"\" self.logger.debug(\"resetting candidate config and", "sent to the device via scrapli Args: config: configuration to prep Returns: tuple:", "\"\\n\".join(line for line in candidate_config.splitlines() if line) return source_config, candidate_config def _pre_clear_config_sessions(self) ->", "output Args: device_output: output from show version command Returns: list[str]: config session names", "candidate config and source config so that we can easily diff them Args:", "requested config Raises: N/A \"\"\" if source == \"running\": return \"show running-config\" return", "session needs to be registered on the device Raises: N/A \"\"\" config, eager_config", "these are \"empty\" we know there is no current config session Args: N/A", "from scrapli_cfg.platform.core.arista_eos.patterns import ( BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, ) from scrapli_cfg.response import ScrapliCfgResponse", "device via scrapli Args: config: configuration to prep Returns: tuple: tuple of \"normal\"", "@staticmethod def _parse_version(device_output: str) -> str: \"\"\" Parse version string out of device", "the end of config if present - if its present it will drop", "@staticmethod def _prepare_config_payloads(config: str) -> Tuple[str, str]: \"\"\" Prepare a configuration so it", "attributes -- when these are \"empty\" we know there is no current config", "config so that we can easily diff them Args: source_config: current config of", ") return response def _post_clear_config_sessions( self, response: ScrapliCfgResponse, scrapli_responses: Iterable[Response], ) -> ScrapliCfgResponse:", "configs for diff object\") # Remove all comment lines from both the source", "str]: \"\"\" Normalize candidate config and source config so that we can easily", "return response def _post_clear_config_sessions( self, response: ScrapliCfgResponse, scrapli_responses: Iterable[Response], ) -> ScrapliCfgResponse: \"\"\"", "a config session Args: config: candidate config to load Returns: tuple: tuple containing", "return source_config, candidate_config def _pre_clear_config_sessions(self) -> ScrapliCfgResponse: \"\"\" Handle pre \"clear_config_sessions\" operations for", "LoggerAdapter from typing import Iterable, List, Tuple, Union from scrapli.driver import AsyncNetworkDriver, NetworkDriver", "version_string = version_string_search.group(0) or \"\" return version_string @staticmethod def _parse_config_sessions(device_output: str) -> List[str]:", "to prep Returns: tuple: tuple of \"normal\" config lines and \"eager\" config lines", "eager_config = self._prepare_config_payloads(config=config) register_config_session = False if not self.config_session_name: self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration", "there is no current config session Args: N/A Returns: None Raises: N/A \"\"\"", "str candidate_config: str @staticmethod def _parse_version(device_output: str) -> str: \"\"\" Parse version string", "import AsyncNetworkDriver, NetworkDriver from scrapli.response import Response from scrapli_cfg.exceptions import ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns", "config to load Returns: tuple: tuple containing \"normal\" config elements to send to", "session names out of device output Args: device_output: output from show version command", "{})) return sessions @staticmethod def _get_config_command(source: str) -> str: \"\"\" Return command to", "running|startup Returns: str: command to use to fetch the requested config Raises: N/A", "Raises: N/A \"\"\" config, eager_config = self._prepare_config_payloads(config=config) register_config_session = False if not self.config_session_name:", "f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session name will be '{self.config_session_name}'\") register_config_session = True return config, eager_config,", "AsyncNetworkDriver] logger: LoggerAdapter config_sources: List[str] config_session_name: str candidate_config: str @staticmethod def _parse_version(device_output: str)", "\"end\" at the end of config if present - if its present it", "= re.sub(pattern=END_PATTERN, repl=\"!\", string=config) # find all sections that need to be \"eagerly\"", "N/A \"\"\" try: config_session_dict = json.loads(device_output) except json.JSONDecodeError: return [] sessions = list(config_session_dict.get(\"sessions\",", "clear device configuration session(s)\" self.logger.critical(msg) response.result = msg else: response.result = \"configuration session(s)", "datetime import datetime from logging import LoggerAdapter from typing import Iterable, List, Tuple,", "from scrapli_cfg.response import ScrapliCfgResponse CONFIG_SOURCES = [ \"running\", \"startup\", ] class ScrapliCfgEOSBase: conn:", "to use to fetch the requested config Raises: N/A \"\"\" if source ==", "\"eagerly\" sent eager_config = re.findall(pattern=BANNER_PATTERN, string=config) for eager_section in eager_config: config = config.replace(eager_section,", "device version string Raises: N/A \"\"\" version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output) if not version_string_search:", "candidate config and config session name attributes -- when these are \"empty\" we", ") from scrapli_cfg.response import ScrapliCfgResponse CONFIG_SOURCES = [ \"running\", \"startup\", ] class ScrapliCfgEOSBase:", "update scrapli_responses: list of scrapli response objects from fetching the version Returns: ScrapliCfgResponse:", "Args: config: configuration to prep Returns: tuple: tuple of \"normal\" config lines and", "Resets the candidate config and config session name attributes -- when these are", "re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\" ) candidate_config = \"\\n\".join(line for line in candidate_config.splitlines() if", "device output Args: device_output: output from show version command Returns: list[str]: config session", "self.logger.info(\"clear_config_sessions requested\") response = ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException ) return response def _post_clear_config_sessions( self,", "for captured_section in eager_config) return config, joined_eager_config def _prepare_load_config_session_and_payload(self, config: str) -> Tuple[str,", "Args: source: name of the config source, generally running|startup Returns: str: command to", "class ScrapliCfgEOSBase: conn: Union[NetworkDriver, AsyncNetworkDriver] logger: LoggerAdapter config_sources: List[str] config_session_name: str candidate_config: str", "both the source and candidate configs -- this is only done # here", "-> ScrapliCfgResponse: \"\"\" Handle pre \"clear_config_sessions\" operations for parity between sync and async", "of device output Args: device_output: output from show version command Returns: list[str]: config", "str) -> List[str]: \"\"\" Parse config session names out of device output Args:", "of the config session which we do not want config = re.sub(pattern=END_PATTERN, repl=\"!\",", "which can totally have # those comment lines - we only remove \"global\"", "scrapli \"eager=True\"), and lastly a bool indicating if the config session needs to", "GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, ) from scrapli_cfg.response import ScrapliCfgResponse CONFIG_SOURCES = [ \"running\", \"startup\", ]", "= [ \"running\", \"startup\", ] class ScrapliCfgEOSBase: conn: Union[NetworkDriver, AsyncNetworkDriver] logger: LoggerAdapter config_sources:", "\"running\": return \"show running-config\" return \"show startup-config\" @staticmethod def _prepare_config_payloads(config: str) -> Tuple[str,", "though... user comments # attached to interfaces and the stuff will remain source_config", "device_output: output from show version command Returns: list[str]: config session names Raises: N/A", "= self._prepare_config_payloads(config=config) register_config_session = False if not self.config_session_name: self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session", "\"\"\" # remove comment lines config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config) # remove \"end\"", "the device and \"eager\" mode config elements to send to the device (things", "register_config_session = True return config, eager_config, register_config_session def _reset_config_session(self) -> None: \"\"\" Reset", "[] sessions = list(config_session_dict.get(\"sessions\", {})) return sessions @staticmethod def _get_config_command(source: str) -> str:", "config: str) -> Tuple[str, str, bool]: \"\"\" Prepare the normal and eager payloads", "config session info Resets the candidate config and config session name attributes --", "= version_string_search.group(0) or \"\" return version_string @staticmethod def _parse_config_sessions(device_output: str) -> List[str]: \"\"\"", "version_string @staticmethod def _parse_config_sessions(device_output: str) -> List[str]: \"\"\" Parse config session names out", "config_session_dict = json.loads(device_output) except json.JSONDecodeError: return [] sessions = list(config_session_dict.get(\"sessions\", {})) return sessions", "True return config, eager_config, register_config_session def _reset_config_session(self) -> None: \"\"\" Reset config session", "list of scrapli response objects from fetching the version Returns: ScrapliCfgResponse: response object", "to use to get config based on the provided source Args: source: name", "line in source_config.splitlines() if line) candidate_config = re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\" ) candidate_config", "_get_config_command(source: str) -> str: \"\"\" Return command to use to get config based", "config: configuration to prep Returns: tuple: tuple of \"normal\" config lines and \"eager\"", "when these are \"empty\" we know there is no current config session Args:", "return \"\" version_string = version_string_search.group(0) or \"\" return version_string @staticmethod def _parse_config_sessions(device_output: str)", "the source and candidate configs -- this is only done # here pre-diff,", "from logging import LoggerAdapter from typing import Iterable, List, Tuple, Union from scrapli.driver", "lines from both the source and candidate configs -- this is only done", "scrapli_cfg.platform.core.arista_eos.patterns import ( BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, ) from scrapli_cfg.response import ScrapliCfgResponse CONFIG_SOURCES", "device configuration session(s)\" self.logger.critical(msg) response.result = msg else: response.result = \"configuration session(s) cleared\"", "eager_config = re.findall(pattern=BANNER_PATTERN, string=config) for eager_section in eager_config: config = config.replace(eager_section, \"!\") joined_eager_config", "eager_section in eager_config: config = config.replace(eager_section, \"!\") joined_eager_config = \"\\n\".join(captured_section for captured_section in", "attached to interfaces and the stuff will remain source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\")", "\"eager\" config lines Raises: N/A \"\"\" # remove comment lines config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN,", "here pre-diff, so we dont modify the user provided candidate config which can", "and the stuff will remain source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\") source_config = \"\\n\".join(line", "scrapli.driver import AsyncNetworkDriver, NetworkDriver from scrapli.response import Response from scrapli_cfg.exceptions import ScrapliCfgException from", "candidate_config def _pre_clear_config_sessions(self) -> ScrapliCfgResponse: \"\"\" Handle pre \"clear_config_sessions\" operations for parity between", "that we can easily diff them Args: source_config: current config of the source", "sync and async Args: N/A Returns: ScrapliCfgResponse: new response object to update w/", "in candidate_config.splitlines() if line) return source_config, candidate_config def _pre_clear_config_sessions(self) -> ScrapliCfgResponse: \"\"\" Handle", "source config so that we can easily diff them Args: source_config: current config", "Returns: ScrapliCfgResponse: response object containing string of the version as the `result` attribute", "candidate_config = \"\\n\".join(line for line in candidate_config.splitlines() if line) return source_config, candidate_config def", "sections that need to be \"eagerly\" sent eager_config = re.findall(pattern=BANNER_PATTERN, string=config) for eager_section", "if not version_string_search: return \"\" version_string = version_string_search.group(0) or \"\" return version_string @staticmethod", "user comments # attached to interfaces and the stuff will remain source_config =", "ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns import ( BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, ) from scrapli_cfg.response import", "at the end of config if present - if its present it will", "the config source, generally running|startup Returns: str: command to use to fetch the", "string Raises: N/A \"\"\" version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output) if not version_string_search: return \"\"", "and eager payloads and decide if we need to register a config session", "object containing string of the version as the `result` attribute Raises: N/A \"\"\"", "\"failed to clear device configuration session(s)\" self.logger.critical(msg) response.result = msg else: response.result =", "response def _post_clear_config_sessions( self, response: ScrapliCfgResponse, scrapli_responses: Iterable[Response], ) -> ScrapliCfgResponse: \"\"\" Handle", "= re.findall(pattern=BANNER_PATTERN, string=config) for eager_section in eager_config: config = config.replace(eager_section, \"!\") joined_eager_config =", "command Returns: list[str]: config session names Raises: N/A \"\"\" try: config_session_dict = json.loads(device_output)", "N/A \"\"\" if source == \"running\": return \"show running-config\" return \"show startup-config\" @staticmethod", "a configuration so it can be nicely sent to the device via scrapli", "end of config if present - if its present it will drop scrapli", "registered on the device Raises: N/A \"\"\" config, eager_config = self._prepare_config_payloads(config=config) register_config_session =", "containing string of the version as the `result` attribute Raises: N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses)", "\"\"\" Parse version string out of device output Args: device_output: output from show", "device Raises: N/A \"\"\" config, eager_config = self._prepare_config_payloads(config=config) register_config_session = False if not", "conn: Union[NetworkDriver, AsyncNetworkDriver] logger: LoggerAdapter config_sources: List[str] config_session_name: str candidate_config: str @staticmethod def", "that need to be \"eagerly\" sent eager_config = re.findall(pattern=BANNER_PATTERN, string=config) for eager_section in", "= \"\" def _normalize_source_candidate_configs(self, source_config: str) -> Tuple[str, str]: \"\"\" Normalize candidate config", "version command Returns: str: device version string Raises: N/A \"\"\" version_string_search = re.search(pattern=VERSION_PATTERN,", "we need to register a config session Args: config: candidate config to load", "session(s)\" self.logger.critical(msg) response.result = msg else: response.result = \"configuration session(s) cleared\" return response", "config lines and \"eager\" config lines Raises: N/A \"\"\" # remove comment lines", "do not want config = re.sub(pattern=END_PATTERN, repl=\"!\", string=config) # find all sections that", "\"running\", \"startup\", ] class ScrapliCfgEOSBase: conn: Union[NetworkDriver, AsyncNetworkDriver] logger: LoggerAdapter config_sources: List[str] config_session_name:", "\"clear_config_sessions\" operations for parity between sync and async Args: N/A Returns: ScrapliCfgResponse: new", "source, generally running|startup Returns: str: command to use to fetch the requested config", "except json.JSONDecodeError: return [] sessions = list(config_session_dict.get(\"sessions\", {})) return sessions @staticmethod def _get_config_command(source:", "the version as the `result` attribute Raises: N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses) if response.failed: msg", "diff them Args: source_config: current config of the source config store Returns: ScrapliCfgDiff:", "\"\"\" if source == \"running\": return \"show running-config\" return \"show startup-config\" @staticmethod def", "N/A \"\"\" self.logger.debug(\"resetting candidate config and config session name\") self.candidate_config = \"\" self.config_session_name", "] class ScrapliCfgEOSBase: conn: Union[NetworkDriver, AsyncNetworkDriver] logger: LoggerAdapter config_sources: List[str] config_session_name: str candidate_config:", "user provided candidate config which can totally have # those comment lines -", "Normalize candidate config and source config so that we can easily diff them", "to be \"eagerly\" sent eager_config = re.findall(pattern=BANNER_PATTERN, string=config) for eager_section in eager_config: config", "sessions = list(config_session_dict.get(\"sessions\", {})) return sessions @staticmethod def _get_config_command(source: str) -> str: \"\"\"", "= \"\\n\".join(line for line in candidate_config.splitlines() if line) return source_config, candidate_config def _pre_clear_config_sessions(self)", "use to fetch the requested config Raises: N/A \"\"\" if source == \"running\":", "Remove all comment lines from both the source and candidate configs -- this", "nicely sent to the device via scrapli Args: config: configuration to prep Returns:", "False if not self.config_session_name: self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session name will be '{self.config_session_name}'\")", "to update w/ get results Raises: N/A \"\"\" self.logger.info(\"clear_config_sessions requested\") response = ScrapliCfgResponse(", "configuration to prep Returns: tuple: tuple of \"normal\" config lines and \"eager\" config", "be \"eagerly\" sent eager_config = re.findall(pattern=BANNER_PATTERN, string=config) for eager_section in eager_config: config =", "source: name of the config source, generally running|startup Returns: str: command to use", "= \"\\n\".join(captured_section for captured_section in eager_config) return config, joined_eager_config def _prepare_load_config_session_and_payload(self, config: str)", "and decide if we need to register a config session Args: config: candidate", "version string out of device output Args: device_output: output from show version command", "ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException ) return response def _post_clear_config_sessions( self, response: ScrapliCfgResponse, scrapli_responses: Iterable[Response],", "load Returns: tuple: tuple containing \"normal\" config elements to send to the device", "to the device and \"eager\" mode config elements to send to the device", "response: ScrapliCfgResponse, scrapli_responses: Iterable[Response], ) -> ScrapliCfgResponse: \"\"\" Handle post \"clear_config_sessions\" operations for", "string of the version as the `result` attribute Raises: N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses) if", "config_session_name: str candidate_config: str @staticmethod def _parse_version(device_output: str) -> str: \"\"\" Parse version", "re.search(pattern=VERSION_PATTERN, string=device_output) if not version_string_search: return \"\" version_string = version_string_search.group(0) or \"\" return", "use to get config based on the provided source Args: source: name of", "List[str]: \"\"\" Parse config session names out of device output Args: device_output: output", "need to be \"eagerly\" sent eager_config = re.findall(pattern=BANNER_PATTERN, string=config) for eager_section in eager_config:", "re from datetime import datetime from logging import LoggerAdapter from typing import Iterable,", "current config of the source config store Returns: ScrapliCfgDiff: scrapli cfg diff object", "Returns: tuple: tuple containing \"normal\" config elements to send to the device and", "candidate configs for diff object\") # Remove all comment lines from both the", "the version Returns: ScrapliCfgResponse: response object containing string of the version as the", "of \"normal\" config lines and \"eager\" config lines Raises: N/A \"\"\" # remove", "version Returns: ScrapliCfgResponse: response object containing string of the version as the `result`", "of device output Args: device_output: output from show version command Returns: str: device", "CONFIG_SOURCES = [ \"running\", \"startup\", ] class ScrapliCfgEOSBase: conn: Union[NetworkDriver, AsyncNetworkDriver] logger: LoggerAdapter", "return \"show running-config\" return \"show startup-config\" @staticmethod def _prepare_config_payloads(config: str) -> Tuple[str, str]:", "datetime from logging import LoggerAdapter from typing import Iterable, List, Tuple, Union from", "\"\"\" self.logger.info(\"clear_config_sessions requested\") response = ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException ) return response def _post_clear_config_sessions(", "\"show startup-config\" @staticmethod def _prepare_config_payloads(config: str) -> Tuple[str, str]: \"\"\" Prepare a configuration", "drop scrapli out # of the config session which we do not want", "candidate_config: str @staticmethod def _parse_version(device_output: str) -> str: \"\"\" Parse version string out", "session info Resets the candidate config and config session name attributes -- when", "LoggerAdapter config_sources: List[str] config_session_name: str candidate_config: str @staticmethod def _parse_version(device_output: str) -> str:", "source_config: str) -> Tuple[str, str]: \"\"\" Normalize candidate config and source config so", "str]: \"\"\" Prepare a configuration so it can be nicely sent to the", "\"\"\" Normalize candidate config and source config so that we can easily diff", "None Raises: N/A \"\"\" self.logger.debug(\"resetting candidate config and config session name\") self.candidate_config =", "scrapli_cfg.exceptions import ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns import ( BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, ) from", "== \"running\": return \"show running-config\" return \"show startup-config\" @staticmethod def _prepare_config_payloads(config: str) ->", "config and source config so that we can easily diff them Args: source_config:", "-> ScrapliCfgResponse: \"\"\" Handle post \"clear_config_sessions\" operations for parity between sync and async", "@staticmethod def _parse_config_sessions(device_output: str) -> List[str]: \"\"\" Parse config session names out of", "= \"\\n\".join(line for line in source_config.splitlines() if line) candidate_config = re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config,", "scrapli Args: config: configuration to prep Returns: tuple: tuple of \"normal\" config lines", "for line in source_config.splitlines() if line) candidate_config = re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\" )", "str) -> Tuple[str, str]: \"\"\" Prepare a configuration so it can be nicely", "-> None: \"\"\" Reset config session info Resets the candidate config and config", "N/A Returns: None Raises: N/A \"\"\" self.logger.debug(\"resetting candidate config and config session name\")", "response object to update w/ get results Raises: N/A \"\"\" self.logger.info(\"clear_config_sessions requested\") response", "to send to the device (things like banners/macro that require scrapli \"eager=True\"), and", "require scrapli \"eager=True\"), and lastly a bool indicating if the config session needs", "names out of device output Args: device_output: output from show version command Returns:", "know there is no current config session Args: N/A Returns: None Raises: N/A", "ScrapliCfgResponse: response object containing string of the version as the `result` attribute Raises:", "BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, ) from scrapli_cfg.response import ScrapliCfgResponse CONFIG_SOURCES = [ \"running\",", "the user provided candidate config which can totally have # those comment lines", "scrapli_cfg.response import ScrapliCfgResponse CONFIG_SOURCES = [ \"running\", \"startup\", ] class ScrapliCfgEOSBase: conn: Union[NetworkDriver,", "which we do not want config = re.sub(pattern=END_PATTERN, repl=\"!\", string=config) # find all", "not version_string_search: return \"\" version_string = version_string_search.group(0) or \"\" return version_string @staticmethod def", "for diff object\") # Remove all comment lines from both the source and", "def _reset_config_session(self) -> None: \"\"\" Reset config session info Resets the candidate config", "be registered on the device Raises: N/A \"\"\" config, eager_config = self._prepare_config_payloads(config=config) register_config_session", "-- this is only done # here pre-diff, so we dont modify the", "str) -> Tuple[str, str]: \"\"\" Normalize candidate config and source config so that", "to load Returns: tuple: tuple containing \"normal\" config elements to send to the", "the requested config Raises: N/A \"\"\" if source == \"running\": return \"show running-config\"", "post \"clear_config_sessions\" operations for parity between sync and async Args: response: response object", "return config, eager_config, register_config_session def _reset_config_session(self) -> None: \"\"\" Reset config session info", "_prepare_load_config_session_and_payload(self, config: str) -> Tuple[str, str, bool]: \"\"\" Prepare the normal and eager", "the config session needs to be registered on the device Raises: N/A \"\"\"", ") candidate_config = \"\\n\".join(line for line in candidate_config.splitlines() if line) return source_config, candidate_config", "from fetching the version Returns: ScrapliCfgResponse: response object containing string of the version", "= re.search(pattern=VERSION_PATTERN, string=device_output) if not version_string_search: return \"\" version_string = version_string_search.group(0) or \"\"", "version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output) if not version_string_search: return \"\" version_string = version_string_search.group(0) or", "store Returns: ScrapliCfgDiff: scrapli cfg diff object Raises: N/A \"\"\" self.logger.debug(\"normalizing source and", "ScrapliCfgResponse, scrapli_responses: Iterable[Response], ) -> ScrapliCfgResponse: \"\"\" Handle post \"clear_config_sessions\" operations for parity", "= f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session name will be '{self.config_session_name}'\") register_config_session = True return config,", "string=self.candidate_config, repl=\"\" ) candidate_config = \"\\n\".join(line for line in candidate_config.splitlines() if line) return", "AsyncNetworkDriver, NetworkDriver from scrapli.response import Response from scrapli_cfg.exceptions import ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns import", "show version command Returns: list[str]: config session names Raises: N/A \"\"\" try: config_session_dict", "tuple of \"normal\" config lines and \"eager\" config lines Raises: N/A \"\"\" #", "new response object to update w/ get results Raises: N/A \"\"\" self.logger.info(\"clear_config_sessions requested\")", "version as the `result` attribute Raises: N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses) if response.failed: msg =", "string=device_output) if not version_string_search: return \"\" version_string = version_string_search.group(0) or \"\" return version_string", "# remove comment lines config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config) # remove \"end\" at", "eager payloads and decide if we need to register a config session Args:", "list[str]: config session names Raises: N/A \"\"\" try: config_session_dict = json.loads(device_output) except json.JSONDecodeError:", "eager_config, register_config_session def _reset_config_session(self) -> None: \"\"\" Reset config session info Resets the", "command Returns: str: device version string Raises: N/A \"\"\" version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output)", "- if its present it will drop scrapli out # of the config", "line) candidate_config = re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\" ) candidate_config = \"\\n\".join(line for line", "-> Tuple[str, str]: \"\"\" Normalize candidate config and source config so that we", "comments # attached to interfaces and the stuff will remain source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN,", "NetworkDriver from scrapli.response import Response from scrapli_cfg.exceptions import ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns import (", "\"\"\" Parse config session names out of device output Args: device_output: output from", "source_config: current config of the source config store Returns: ScrapliCfgDiff: scrapli cfg diff", "Returns: list[str]: config session names Raises: N/A \"\"\" try: config_session_dict = json.loads(device_output) except", "source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\") source_config = \"\\n\".join(line for line in source_config.splitlines() if", "\"\"\" Return command to use to get config based on the provided source", "config session names out of device output Args: device_output: output from show version", "device output Args: device_output: output from show version command Returns: str: device version", "and \"eager\" mode config elements to send to the device (things like banners/macro", "\"\" def _normalize_source_candidate_configs(self, source_config: str) -> Tuple[str, str]: \"\"\" Normalize candidate config and", "re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config) # remove \"end\" at the end of config if present", "to register a config session Args: config: candidate config to load Returns: tuple:", "the stuff will remain source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\") source_config = \"\\n\".join(line for", "session Args: N/A Returns: None Raises: N/A \"\"\" self.logger.debug(\"resetting candidate config and config", "typing import Iterable, List, Tuple, Union from scrapli.driver import AsyncNetworkDriver, NetworkDriver from scrapli.response", "= \"\" self.config_session_name = \"\" def _normalize_source_candidate_configs(self, source_config: str) -> Tuple[str, str]: \"\"\"", "Returns: ScrapliCfgDiff: scrapli cfg diff object Raises: N/A \"\"\" self.logger.debug(\"normalizing source and candidate", "= json.loads(device_output) except json.JSONDecodeError: return [] sessions = list(config_session_dict.get(\"sessions\", {})) return sessions @staticmethod", "to update scrapli_responses: list of scrapli response objects from fetching the version Returns:", "return config, joined_eager_config def _prepare_load_config_session_and_payload(self, config: str) -> Tuple[str, str, bool]: \"\"\" Prepare", "= \"failed to clear device configuration session(s)\" self.logger.critical(msg) response.result = msg else: response.result", "config: candidate config to load Returns: tuple: tuple containing \"normal\" config elements to", "_parse_version(device_output: str) -> str: \"\"\" Parse version string out of device output Args:", "# Remove all comment lines from both the source and candidate configs --", "response = ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException ) return response def _post_clear_config_sessions( self, response: ScrapliCfgResponse,", "\"\"\" Prepare a configuration so it can be nicely sent to the device", "config and config session name\") self.candidate_config = \"\" self.config_session_name = \"\" def _normalize_source_candidate_configs(self,", "config session names Raises: N/A \"\"\" try: config_session_dict = json.loads(device_output) except json.JSONDecodeError: return", "pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\" ) candidate_config = \"\\n\".join(line for line in candidate_config.splitlines() if line)", "and async Args: response: response object to update scrapli_responses: list of scrapli response", "version_string_search: return \"\" version_string = version_string_search.group(0) or \"\" return version_string @staticmethod def _parse_config_sessions(device_output:", "return [] sessions = list(config_session_dict.get(\"sessions\", {})) return sessions @staticmethod def _get_config_command(source: str) ->", "re.sub(pattern=END_PATTERN, repl=\"!\", string=config) # find all sections that need to be \"eagerly\" sent", "we can easily diff them Args: source_config: current config of the source config", "lines Raises: N/A \"\"\" # remove comment lines config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config)", "its present it will drop scrapli out # of the config session which", "VERSION_PATTERN, ) from scrapli_cfg.response import ScrapliCfgResponse CONFIG_SOURCES = [ \"running\", \"startup\", ] class", "`result` attribute Raises: N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses) if response.failed: msg = \"failed to clear", "only done # here pre-diff, so we dont modify the user provided candidate", "needs to be registered on the device Raises: N/A \"\"\" config, eager_config =", "repl=\"\" ) candidate_config = \"\\n\".join(line for line in candidate_config.splitlines() if line) return source_config,", "source_config, candidate_config def _pre_clear_config_sessions(self) -> ScrapliCfgResponse: \"\"\" Handle pre \"clear_config_sessions\" operations for parity", "want config = re.sub(pattern=END_PATTERN, repl=\"!\", string=config) # find all sections that need to", "is only done # here pre-diff, so we dont modify the user provided", "N/A \"\"\" config, eager_config = self._prepare_config_payloads(config=config) register_config_session = False if not self.config_session_name: self.config_session_name", "from scrapli.driver import AsyncNetworkDriver, NetworkDriver from scrapli.response import Response from scrapli_cfg.exceptions import ScrapliCfgException", "logging import LoggerAdapter from typing import Iterable, List, Tuple, Union from scrapli.driver import", "from datetime import datetime from logging import LoggerAdapter from typing import Iterable, List,", "\"empty\" we know there is no current config session Args: N/A Returns: None", "configuration so it can be nicely sent to the device via scrapli Args:", "List, Tuple, Union from scrapli.driver import AsyncNetworkDriver, NetworkDriver from scrapli.response import Response from", "normal and eager payloads and decide if we need to register a config", "register_config_session = False if not self.config_session_name: self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session name will", "Raises: N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses) if response.failed: msg = \"failed to clear device configuration", "async Args: response: response object to update scrapli_responses: list of scrapli response objects", "names Raises: N/A \"\"\" try: config_session_dict = json.loads(device_output) except json.JSONDecodeError: return [] sessions", "config Raises: N/A \"\"\" if source == \"running\": return \"show running-config\" return \"show", "Tuple, Union from scrapli.driver import AsyncNetworkDriver, NetworkDriver from scrapli.response import Response from scrapli_cfg.exceptions", "so that we can easily diff them Args: source_config: current config of the", "so we dont modify the user provided candidate config which can totally have", "def _prepare_config_payloads(config: str) -> Tuple[str, str]: \"\"\" Prepare a configuration so it can", "if response.failed: msg = \"failed to clear device configuration session(s)\" self.logger.critical(msg) response.result =", "current config session Args: N/A Returns: None Raises: N/A \"\"\" self.logger.debug(\"resetting candidate config", "session Args: config: candidate config to load Returns: tuple: tuple containing \"normal\" config", "and source config so that we can easily diff them Args: source_config: current", "Raises: N/A \"\"\" try: config_session_dict = json.loads(device_output) except json.JSONDecodeError: return [] sessions =", "N/A \"\"\" self.logger.info(\"clear_config_sessions requested\") response = ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException ) return response def", "str: \"\"\" Return command to use to get config based on the provided", "config.replace(eager_section, \"!\") joined_eager_config = \"\\n\".join(captured_section for captured_section in eager_config) return config, joined_eager_config def", "mode config elements to send to the device (things like banners/macro that require", "send to the device and \"eager\" mode config elements to send to the", "to get config based on the provided source Args: source: name of the", "captured_section in eager_config) return config, joined_eager_config def _prepare_load_config_session_and_payload(self, config: str) -> Tuple[str, str,", "remove comment lines config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, repl=\"!\", string=config) # remove \"end\" at the", "it will drop scrapli out # of the config session which we do", "config_sources: List[str] config_session_name: str candidate_config: str @staticmethod def _parse_version(device_output: str) -> str: \"\"\"", "fetch the requested config Raises: N/A \"\"\" if source == \"running\": return \"show", "a bool indicating if the config session needs to be registered on the", "from scrapli_cfg.exceptions import ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns import ( BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, )", "re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\") source_config = \"\\n\".join(line for line in source_config.splitlines() if line) candidate_config", "Parse config session names out of device output Args: device_output: output from show", "all sections that need to be \"eagerly\" sent eager_config = re.findall(pattern=BANNER_PATTERN, string=config) for", "or \"\" return version_string @staticmethod def _parse_config_sessions(device_output: str) -> List[str]: \"\"\" Parse config", "def _normalize_source_candidate_configs(self, source_config: str) -> Tuple[str, str]: \"\"\" Normalize candidate config and source", "Raises: N/A \"\"\" self.logger.info(\"clear_config_sessions requested\") response = ScrapliCfgResponse( host=self.conn.host, raise_for_status_exception=ScrapliCfgException ) return response", "send to the device (things like banners/macro that require scrapli \"eager=True\"), and lastly", "\"startup\", ] class ScrapliCfgEOSBase: conn: Union[NetworkDriver, AsyncNetworkDriver] logger: LoggerAdapter config_sources: List[str] config_session_name: str", "indicating if the config session needs to be registered on the device Raises:", "based on the provided source Args: source: name of the config source, generally", "Args: device_output: output from show version command Returns: list[str]: config session names Raises:", "source Args: source: name of the config source, generally running|startup Returns: str: command", "self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session name will be '{self.config_session_name}'\") register_config_session = True return", "if line) return source_config, candidate_config def _pre_clear_config_sessions(self) -> ScrapliCfgResponse: \"\"\" Handle pre \"clear_config_sessions\"", "async Args: N/A Returns: ScrapliCfgResponse: new response object to update w/ get results", "\"eager\" mode config elements to send to the device (things like banners/macro that", "repl=\"\") source_config = \"\\n\".join(line for line in source_config.splitlines() if line) candidate_config = re.sub(", "response object to update scrapli_responses: list of scrapli response objects from fetching the", "Raises: N/A \"\"\" version_string_search = re.search(pattern=VERSION_PATTERN, string=device_output) if not version_string_search: return \"\" version_string", "# here pre-diff, so we dont modify the user provided candidate config which", "joined_eager_config = \"\\n\".join(captured_section for captured_section in eager_config) return config, joined_eager_config def _prepare_load_config_session_and_payload(self, config:", "json import re from datetime import datetime from logging import LoggerAdapter from typing", "elements to send to the device (things like banners/macro that require scrapli \"eager=True\"),", "output from show version command Returns: list[str]: config session names Raises: N/A \"\"\"", "if we need to register a config session Args: config: candidate config to", "-> str: \"\"\" Parse version string out of device output Args: device_output: output", "for eager_section in eager_config: config = config.replace(eager_section, \"!\") joined_eager_config = \"\\n\".join(captured_section for captured_section", "only remove \"global\" (top level) comments though... user comments # attached to interfaces", "joined_eager_config def _prepare_load_config_session_and_payload(self, config: str) -> Tuple[str, str, bool]: \"\"\" Prepare the normal", "to clear device configuration session(s)\" self.logger.critical(msg) response.result = msg else: response.result = \"configuration", "string=source_config, repl=\"\") source_config = \"\\n\".join(line for line in source_config.splitlines() if line) candidate_config =", "bool]: \"\"\" Prepare the normal and eager payloads and decide if we need", "remove \"global\" (top level) comments though... user comments # attached to interfaces and", "sync and async Args: response: response object to update scrapli_responses: list of scrapli", "response objects from fetching the version Returns: ScrapliCfgResponse: response object containing string of", "str) -> str: \"\"\" Return command to use to get config based on", "object Raises: N/A \"\"\" self.logger.debug(\"normalizing source and candidate configs for diff object\") #", "to interfaces and the stuff will remain source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\") source_config", "def _parse_config_sessions(device_output: str) -> List[str]: \"\"\" Parse config session names out of device", "attribute Raises: N/A \"\"\" response.record_response(scrapli_responses=scrapli_responses) if response.failed: msg = \"failed to clear device", "stuff will remain source_config = re.sub(pattern=GLOBAL_COMMENT_LINE_PATTERN, string=source_config, repl=\"\") source_config = \"\\n\".join(line for line", "response object containing string of the version as the `result` attribute Raises: N/A", "Raises: N/A \"\"\" self.logger.debug(\"normalizing source and candidate configs for diff object\") # Remove", "no current config session Args: N/A Returns: None Raises: N/A \"\"\" self.logger.debug(\"resetting candidate", "are \"empty\" we know there is no current config session Args: N/A Returns:", "remove \"end\" at the end of config if present - if its present", "find all sections that need to be \"eagerly\" sent eager_config = re.findall(pattern=BANNER_PATTERN, string=config)", "to the device via scrapli Args: config: configuration to prep Returns: tuple: tuple", "Parse version string out of device output Args: device_output: output from show version", "\"show running-config\" return \"show startup-config\" @staticmethod def _prepare_config_payloads(config: str) -> Tuple[str, str]: \"\"\"", "if present - if its present it will drop scrapli out # of", "source config store Returns: ScrapliCfgDiff: scrapli cfg diff object Raises: N/A \"\"\" self.logger.debug(\"normalizing", "configuration session(s)\" self.logger.critical(msg) response.result = msg else: response.result = \"configuration session(s) cleared\" return", "is no current config session Args: N/A Returns: None Raises: N/A \"\"\" self.logger.debug(\"resetting", "Args: device_output: output from show version command Returns: str: device version string Raises:", "Handle pre \"clear_config_sessions\" operations for parity between sync and async Args: N/A Returns:", "config, eager_config, register_config_session def _reset_config_session(self) -> None: \"\"\" Reset config session info Resets", "Tuple[str, str]: \"\"\" Prepare a configuration so it can be nicely sent to", "on the provided source Args: source: name of the config source, generally running|startup", "we only remove \"global\" (top level) comments though... user comments # attached to", "self.config_session_name: self.config_session_name = f\"scrapli_cfg_{round(datetime.now().timestamp())}\" self.logger.debug(f\"configuration session name will be '{self.config_session_name}'\") register_config_session = True", "# those comment lines - we only remove \"global\" (top level) comments though...", "config based on the provided source Args: source: name of the config source,", "scrapli.response import Response from scrapli_cfg.exceptions import ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns import ( BANNER_PATTERN, END_PATTERN,", "Union from scrapli.driver import AsyncNetworkDriver, NetworkDriver from scrapli.response import Response from scrapli_cfg.exceptions import", "line) return source_config, candidate_config def _pre_clear_config_sessions(self) -> ScrapliCfgResponse: \"\"\" Handle pre \"clear_config_sessions\" operations", "the provided source Args: source: name of the config source, generally running|startup Returns:", "ScrapliCfgResponse: \"\"\" Handle pre \"clear_config_sessions\" operations for parity between sync and async Args:", "for parity between sync and async Args: response: response object to update scrapli_responses:", "from show version command Returns: str: device version string Raises: N/A \"\"\" version_string_search", "from scrapli.response import Response from scrapli_cfg.exceptions import ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns import ( BANNER_PATTERN,", "END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN, ) from scrapli_cfg.response import ScrapliCfgResponse CONFIG_SOURCES = [ \"running\", \"startup\",", "object to update scrapli_responses: list of scrapli response objects from fetching the version", "config session which we do not want config = re.sub(pattern=END_PATTERN, repl=\"!\", string=config) #", "scrapli_responses: list of scrapli response objects from fetching the version Returns: ScrapliCfgResponse: response", "import LoggerAdapter from typing import Iterable, List, Tuple, Union from scrapli.driver import AsyncNetworkDriver,", "Response from scrapli_cfg.exceptions import ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns import ( BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN, VERSION_PATTERN,", "Prepare the normal and eager payloads and decide if we need to register", "lastly a bool indicating if the config session needs to be registered on", "N/A \"\"\" self.logger.debug(\"normalizing source and candidate configs for diff object\") # Remove all", "import datetime from logging import LoggerAdapter from typing import Iterable, List, Tuple, Union", "def _post_clear_config_sessions( self, response: ScrapliCfgResponse, scrapli_responses: Iterable[Response], ) -> ScrapliCfgResponse: \"\"\" Handle post", "the device via scrapli Args: config: configuration to prep Returns: tuple: tuple of", "info Resets the candidate config and config session name attributes -- when these", "the normal and eager payloads and decide if we need to register a", "self.config_session_name = \"\" def _normalize_source_candidate_configs(self, source_config: str) -> Tuple[str, str]: \"\"\" Normalize candidate", "operations for parity between sync and async Args: response: response object to update", "return sessions @staticmethod def _get_config_command(source: str) -> str: \"\"\" Return command to use", "\"\"\"scrapli_cfg.platform.core.arista_eos.base\"\"\" import json import re from datetime import datetime from logging import LoggerAdapter", "Reset config session info Resets the candidate config and config session name attributes", "dont modify the user provided candidate config which can totally have # those", "of config if present - if its present it will drop scrapli out", "-> Tuple[str, str, bool]: \"\"\" Prepare the normal and eager payloads and decide", "and lastly a bool indicating if the config session needs to be registered", "modify the user provided candidate config which can totally have # those comment", "Returns: ScrapliCfgResponse: new response object to update w/ get results Raises: N/A \"\"\"", "can easily diff them Args: source_config: current config of the source config store", "from typing import Iterable, List, Tuple, Union from scrapli.driver import AsyncNetworkDriver, NetworkDriver from", "str) -> str: \"\"\" Parse version string out of device output Args: device_output:", "Raises: N/A \"\"\" if source == \"running\": return \"show running-config\" return \"show startup-config\"", "\"normal\" config lines and \"eager\" config lines Raises: N/A \"\"\" # remove comment", "= re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\" ) candidate_config = \"\\n\".join(line for line in candidate_config.splitlines()", "import Response from scrapli_cfg.exceptions import ScrapliCfgException from scrapli_cfg.platform.core.arista_eos.patterns import ( BANNER_PATTERN, END_PATTERN, GLOBAL_COMMENT_LINE_PATTERN,", "from show version command Returns: list[str]: config session names Raises: N/A \"\"\" try:", "them Args: source_config: current config of the source config store Returns: ScrapliCfgDiff: scrapli", "if the config session needs to be registered on the device Raises: N/A", "import re from datetime import datetime from logging import LoggerAdapter from typing import", "source_config.splitlines() if line) candidate_config = re.sub( pattern=GLOBAL_COMMENT_LINE_PATTERN, string=self.candidate_config, repl=\"\" ) candidate_config = \"\\n\".join(line", "parity between sync and async Args: N/A Returns: ScrapliCfgResponse: new response object to", "return \"show startup-config\" @staticmethod def _prepare_config_payloads(config: str) -> Tuple[str, str]: \"\"\" Prepare a", "scrapli out # of the config session which we do not want config", "between sync and async Args: N/A Returns: ScrapliCfgResponse: new response object to update", "pre-diff, so we dont modify the user provided candidate config which can totally", "json.JSONDecodeError: return [] sessions = list(config_session_dict.get(\"sessions\", {})) return sessions @staticmethod def _get_config_command(source: str)", "list(config_session_dict.get(\"sessions\", {})) return sessions @staticmethod def _get_config_command(source: str) -> str: \"\"\" Return command", "prep Returns: tuple: tuple of \"normal\" config lines and \"eager\" config lines Raises:", "configs -- this is only done # here pre-diff, so we dont modify", "raise_for_status_exception=ScrapliCfgException ) return response def _post_clear_config_sessions( self, response: ScrapliCfgResponse, scrapli_responses: Iterable[Response], ) ->", "scrapli response objects from fetching the version Returns: ScrapliCfgResponse: response object containing string", "\"normal\" config elements to send to the device and \"eager\" mode config elements", "\"clear_config_sessions\" operations for parity between sync and async Args: response: response object to", "decide if we need to register a config session Args: config: candidate config", "need to register a config session Args: config: candidate config to load Returns:", "name will be '{self.config_session_name}'\") register_config_session = True return config, eager_config, register_config_session def _reset_config_session(self)", "def _pre_clear_config_sessions(self) -> ScrapliCfgResponse: \"\"\" Handle pre \"clear_config_sessions\" operations for parity between sync", "fetching the version Returns: ScrapliCfgResponse: response object containing string of the version as", "response.record_response(scrapli_responses=scrapli_responses) if response.failed: msg = \"failed to clear device configuration session(s)\" self.logger.critical(msg) response.result" ]
[ "\"\"\"Commits a transaction, optionally creating, deleting or modifying some entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): \"\"\"Each RPC normalizes the", "google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): \"\"\"Each RPC normalizes the partition IDs of the keys in", "servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString, ), } generic_handler =", "an entity before it is inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "not implemented!') raise NotImplementedError('Method not implemented!') def Commit(self, request, context): \"\"\"Commits a transaction,", "its input entities, and always returns entities with keys with normalized partition IDs.", "import utilities as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import", "request. \"\"\" def Lookup(self, request, context): \"\"\"Looks up entities by key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "back a transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "an empty path and an empty or unset partition ID. Normalization of input", "channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit", "rpc_method_handlers = { 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString,", "google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2", "implemented!') raise NotImplementedError('Method not implemented!') def Rollback(self, request, context): \"\"\"Rolls back a transaction.", "IDs. This applies to all keys and entities, including those in values, except", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def BeginTransaction(self, request, context): \"\"\"Begins", "raise NotImplementedError('Method not implemented!') def RunQuery(self, request, context): \"\"\"Queries for entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "'/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback =", "as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): \"\"\"Each", "grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler(", "request, context): \"\"\"Looks up entities by key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): \"\"\"Each RPC normalizes the partition IDs of the keys", "request. \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.Lookup =", "\"\"\"Queries for entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.datastore.v1.Datastore', rpc_method_handlers)", "NotImplementedError('Method not implemented!') def AllocateIds(self, request, context): \"\"\"Allocates IDs for the given keys,", "class DatastoreStub(object): \"\"\"Each RPC normalizes the partition IDs of the keys in its", "cardinality from grpc.framework.interfaces.face import utilities as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2", "of the keys in its input entities, and always returns entities with keys", "'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction':", "grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2", "with keys with normalized partition IDs. This applies to all keys and entities,", "'/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds =", "ID (if not already set) to the project ID from the request. \"\"\"", "context): \"\"\"Queries for entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", ") self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object): \"\"\"Each RPC normalizes", "NotImplementedError('Method not implemented!') def Rollback(self, request, context): \"\"\"Rolls back a transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "not implemented!') def AllocateIds(self, request, context): \"\"\"Allocates IDs for the given keys, which", "response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString,", "request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary(", "entities, and always returns entities with keys with normalized partition IDs. This applies", "and entities, including those in values, except keys with both an empty path", "'/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction =", "import cardinality from grpc.framework.interfaces.face import utilities as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import", "DatastoreServicer(object): \"\"\"Each RPC normalizes the partition IDs of the keys in its input", "is useful for referencing an entity before it is inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "Commit(self, request, context): \"\"\"Commits a transaction, optionally creating, deleting or modifying some entities.", "input entities, and always returns entities with keys with normalized partition IDs. This", "grpc from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2", "to all keys and entities, including those in values, except keys with both", ") self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString,", "import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): \"\"\"Each RPC normalizes the partition IDs of", "for entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def BeginTransaction(self,", "servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction,", "a new transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "\"\"\"Allocates IDs for the given keys, which is useful for referencing an entity", "context): \"\"\"Begins a new transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "= channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, )", "key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RunQuery(self, request,", "context): \"\"\"Commits a transaction, optionally creating, deleting or modifying some entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString,", "already set) to the project ID from the request. \"\"\" def Lookup(self, request,", "def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers = { 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery':", "to the project ID from the request. \"\"\" def __init__(self, channel): \"\"\"Constructor. Args:", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Commit(self, request, context):", "not implemented!') def BeginTransaction(self, request, context): \"\"\"Begins a new transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback',", "\"\"\" def Lookup(self, request, context): \"\"\"Looks up entities by key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "some entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Rollback(self,", "AllocateIds(self, request, context): \"\"\"Allocates IDs for the given keys, which is useful for", "context): \"\"\"Rolls back a transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "the project ID from the request. \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel:", "add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers = { 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler(", "referencing an entity before it is inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object): \"\"\"Each RPC normalizes the", "with normalized partition IDs. This applies to all keys and entities, including those", "self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString,", "from grpc.framework.interfaces.face import utilities as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as", "empty path and an empty or unset partition ID. Normalization of input keys", "not implemented!') raise NotImplementedError('Method not implemented!') def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers = { 'Lookup':", "the given keys, which is useful for referencing an entity before it is", "channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction", "entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Rollback(self, request,", "channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class", "grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler(", "\"\"\"Rolls back a transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "those in values, except keys with both an empty path and an empty", "deleting or modifying some entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "def BeginTransaction(self, request, context): \"\"\"Begins a new transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString,", "A grpc.Channel. \"\"\" self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary(", "google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): \"\"\"Each RPC normalizes the partition IDs of the", "response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit',", "def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup',", "entities, including those in values, except keys with both an empty path and", "import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): \"\"\"Each RPC normalizes", "grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler(", "a transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def AllocateIds(self,", "def AllocateIds(self, request, context): \"\"\"Allocates IDs for the given keys, which is useful", "grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString, ), } generic_handler", "request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary(", "given keys, which is useful for referencing an entity before it is inserted.", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def AllocateIds(self, request, context): \"\"\"Allocates", "RPC normalizes the partition IDs of the keys in its input entities, and", "as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as", "ID. Normalization of input keys sets the project ID (if not already set)", "in values, except keys with both an empty path and an empty or", "not implemented!') def Rollback(self, request, context): \"\"\"Rolls back a transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "empty or unset partition ID. Normalization of input keys sets the project ID", "NotImplementedError('Method not implemented!') def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers = { 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString,", "Args: channel: A grpc.Channel. \"\"\" self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery", "= { 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString,", "ID from the request. \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel.", "or modifying some entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "utilities as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2", "= channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object): \"\"\"Each RPC normalizes the partition", "partition IDs. This applies to all keys and entities, including those in values,", "\"\"\" self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString,", "), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ),", "creating, deleting or modifying some entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "context): \"\"\"Looks up entities by key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object): \"\"\"Each", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def AllocateIds(self, request, context): \"\"\"Allocates IDs", "request, context): \"\"\"Allocates IDs for the given keys, which is useful for referencing", "normalized partition IDs. This applies to all keys and entities, including those in", "implemented!') def BeginTransaction(self, request, context): \"\"\"Begins a new transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "channel: A grpc.Channel. \"\"\" self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery =", "which is useful for referencing an entity before it is inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "including those in values, except keys with both an empty path and an", "response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object): \"\"\"Each RPC", "implemented!') raise NotImplementedError('Method not implemented!') def RunQuery(self, request, context): \"\"\"Queries for entities. \"\"\"", "unset partition ID. Normalization of input keys sets the project ID (if not", "transaction, optionally creating, deleting or modifying some entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ),", "'/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object): \"\"\"Each RPC normalizes the partition IDs of", "face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2", "set) to the project ID from the request. \"\"\" def __init__(self, channel): \"\"\"Constructor.", "it is inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def AllocateIds(self, request,", "partition IDs of the keys in its input entities, and always returns entities", "request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString,", "\"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.Lookup = channel.unary_unary(", "raise NotImplementedError('Method not implemented!') def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers = { 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup,", "import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class", "ID from the request. \"\"\" def Lookup(self, request, context): \"\"\"Looks up entities by", "by key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RunQuery(self,", "is inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_DatastoreServicer_to_server(servicer,", "import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Rollback(self, request, context): \"\"\"Rolls back", "'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit':", "class DatastoreServicer(object): \"\"\"Each RPC normalizes the partition IDs of the keys in its", "IDs of the keys in its input entities, and always returns entities with", "IDs for the given keys, which is useful for referencing an entity before", "request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler(", "a transaction, optionally creating, deleting or modifying some entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "except keys with both an empty path and an empty or unset partition", "the partition IDs of the keys in its input entities, and always returns", "), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ),", "context): \"\"\"Allocates IDs for the given keys, which is useful for referencing an", "RunQuery(self, request, context): \"\"\"Queries for entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "\"\"\"Each RPC normalizes the partition IDs of the keys in its input entities,", "request, context): \"\"\"Rolls back a transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "keys and entities, including those in values, except keys with both an empty", "DatastoreStub(object): \"\"\"Each RPC normalizes the partition IDs of the keys in its input", "response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object): \"\"\"Each RPC normalizes the partition IDs of the keys", "not implemented!') raise NotImplementedError('Method not implemented!') def BeginTransaction(self, request, context): \"\"\"Begins a new", "response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString,", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Commit(self, request, context): \"\"\"Commits", "request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString,", "not implemented!') raise NotImplementedError('Method not implemented!') def AllocateIds(self, request, context): \"\"\"Allocates IDs for", "def Commit(self, request, context): \"\"\"Commits a transaction, optionally creating, deleting or modifying some", "values, except keys with both an empty path and an empty or unset", "and an empty or unset partition ID. Normalization of input keys sets the", ") self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString,", "request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString,", "response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString,", "the project ID from the request. \"\"\" def Lookup(self, request, context): \"\"\"Looks up", "NotImplementedError('Method not implemented!') def Commit(self, request, context): \"\"\"Commits a transaction, optionally creating, deleting", "Normalization of input keys sets the project ID (if not already set) to", "google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2", "as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as", "NotImplementedError('Method not implemented!') def BeginTransaction(self, request, context): \"\"\"Begins a new transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED)", "'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback':", "'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds':", "with both an empty path and an empty or unset partition ID. Normalization", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def AllocateIds(self, request, context):", "google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): \"\"\"Each RPC normalizes the partition IDs", "raise NotImplementedError('Method not implemented!') def Rollback(self, request, context): \"\"\"Rolls back a transaction. \"\"\"", "(if not already set) to the project ID from the request. \"\"\" def", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RunQuery(self, request, context):", "or unset partition ID. Normalization of input keys sets the project ID (if", "\"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, )", "grpc.Channel. \"\"\" self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery',", "'/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object):", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def BeginTransaction(self, request, context):", "normalizes the partition IDs of the keys in its input entities, and always", "the project ID (if not already set) to the project ID from the", "self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString,", "implemented!') def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers = { 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ),", "an empty or unset partition ID. Normalization of input keys sets the project", "self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString,", "and always returns entities with keys with normalized partition IDs. This applies to", "optionally creating, deleting or modifying some entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as", "input keys sets the project ID (if not already set) to the project", "server): rpc_method_handlers = { 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery,", ") self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString,", "applies to all keys and entities, including those in values, except keys with", "the request. \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.Lookup", "channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds", "set) to the project ID from the request. \"\"\" def Lookup(self, request, context):", "raise NotImplementedError('Method not implemented!') def AllocateIds(self, request, context): \"\"\"Allocates IDs for the given", "useful for referencing an entity before it is inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "Lookup(self, request, context): \"\"\"Looks up entities by key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "up entities by key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "keys with normalized partition IDs. This applies to all keys and entities, including", "request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary(", "'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.datastore.v1.Datastore', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))", "response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds',", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RunQuery(self, request, context): \"\"\"Queries", "sets the project ID (if not already set) to the project ID from", "response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString,", "raise NotImplementedError('Method not implemented!') def BeginTransaction(self, request, context): \"\"\"Begins a new transaction. \"\"\"", "response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.datastore.v1.Datastore',", "def RunQuery(self, request, context): \"\"\"Queries for entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "not already set) to the project ID from the request. \"\"\" def __init__(self,", "already set) to the project ID from the request. \"\"\" def __init__(self, channel):", "= channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, )", "not implemented!') raise NotImplementedError('Method not implemented!') def Rollback(self, request, context): \"\"\"Rolls back a", "keys with both an empty path and an empty or unset partition ID.", "entities by key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "for referencing an entity before it is inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object): \"\"\"Each RPC normalizes the partition IDs", "channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback", "always returns entities with keys with normalized partition IDs. This applies to all", ") self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary( '/google.datastore.v1.Datastore/AllocateIds', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString,", "new transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Commit(self,", "not already set) to the project ID from the request. \"\"\" def Lookup(self,", "raise NotImplementedError('Method not implemented!') def Commit(self, request, context): \"\"\"Commits a transaction, optionally creating,", "implemented!') raise NotImplementedError('Method not implemented!') def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers = { 'Lookup': grpc.unary_unary_rpc_method_handler(", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers = {", "request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.FromString, ) class DatastoreServicer(object): \"\"\"Each RPC normalizes the partition IDs of the", ") class DatastoreServicer(object): \"\"\"Each RPC normalizes the partition IDs of the keys in", "project ID from the request. \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A", "in its input entities, and always returns entities with keys with normalized partition", "'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString, ), }", "__init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\" self.Lookup = channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString,", "before it is inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "implemented!') raise NotImplementedError('Method not implemented!') def BeginTransaction(self, request, context): \"\"\"Begins a new transaction.", "google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object):", "= channel.unary_unary( '/google.datastore.v1.Datastore/Lookup', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, )", "implemented!') raise NotImplementedError('Method not implemented!') def AllocateIds(self, request, context): \"\"\"Allocates IDs for the", "'/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit =", "keys sets the project ID (if not already set) to the project ID", "keys in its input entities, and always returns entities with keys with normalized", "modifying some entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def", "project ID from the request. \"\"\" def Lookup(self, request, context): \"\"\"Looks up entities", "to the project ID from the request. \"\"\" def Lookup(self, request, context): \"\"\"Looks", "response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction',", "transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Commit(self, request,", "keys, which is useful for referencing an entity before it is inserted. \"\"\"", "request, context): \"\"\"Queries for entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "def Lookup(self, request, context): \"\"\"Looks up entities by key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "\"\"\"Looks up entities by key. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "for the given keys, which is useful for referencing an entity before it", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def RunQuery(self, request, context): \"\"\"Queries for", "request, context): \"\"\"Begins a new transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method", "not implemented!') def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers = { 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString,", "project ID (if not already set) to the project ID from the request.", "servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit,", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers", "= channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary( '/google.datastore.v1.Datastore/Rollback', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.FromString, )", "\"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Rollback(self, request, context):", "implemented!') def Commit(self, request, context): \"\"\"Commits a transaction, optionally creating, deleting or modifying", "all keys and entities, including those in values, except keys with both an", "the request. \"\"\" def Lookup(self, request, context): \"\"\"Looks up entities by key. \"\"\"", "request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler( servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString,", "), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.AllocateIdsResponse.SerializeToString, ),", "as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): \"\"\"Each RPC normalizes the partition", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def BeginTransaction(self, request, context): \"\"\"Begins a", "from the request. \"\"\" def __init__(self, channel): \"\"\"Constructor. Args: channel: A grpc.Channel. \"\"\"", "implemented!') raise NotImplementedError('Method not implemented!') def Commit(self, request, context): \"\"\"Commits a transaction, optionally", "servicer.BeginTransaction, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.SerializeToString, ), 'Commit': grpc.unary_unary_rpc_method_handler( servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback,", "both an empty path and an empty or unset partition ID. Normalization of", "from the request. \"\"\" def Lookup(self, request, context): \"\"\"Looks up entities by key.", "returns entities with keys with normalized partition IDs. This applies to all keys", "servicer.Commit, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.SerializeToString, ), 'Rollback': grpc.unary_unary_rpc_method_handler( servicer.Rollback, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RollbackResponse.SerializeToString, ), 'AllocateIds': grpc.unary_unary_rpc_method_handler( servicer.AllocateIds,", "self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary( '/google.datastore.v1.Datastore/Commit', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.CommitResponse.FromString,", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_DatastoreServicer_to_server(servicer, server): rpc_method_handlers =", "google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 class DatastoreStub(object): \"\"\"Each RPC", "<reponame>isaiah-solo/Droptalk<gh_stars>0 import grpc from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities", "import grpc from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities import", "context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Commit(self, request, context): \"\"\"Commits a", "partition ID. Normalization of input keys sets the project ID (if not already", "inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_DatastoreServicer_to_server(servicer, server):", "NotImplementedError('Method not implemented!') def RunQuery(self, request, context): \"\"\"Queries for entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method", "= channel.unary_unary( '/google.datastore.v1.Datastore/RunQuery', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, )", "of input keys sets the project ID (if not already set) to the", "implemented!') def AllocateIds(self, request, context): \"\"\"Allocates IDs for the given keys, which is", "context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Rollback(self, request, context): \"\"\"Rolls", "entity before it is inserted. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not", "path and an empty or unset partition ID. Normalization of input keys sets", "BeginTransaction(self, request, context): \"\"\"Begins a new transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "Rollback(self, request, context): \"\"\"Rolls back a transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise", "request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary( '/google.datastore.v1.Datastore/BeginTransaction', request_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary(", "def Rollback(self, request, context): \"\"\"Rolls back a transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ), 'BeginTransaction': grpc.unary_unary_rpc_method_handler(", "not implemented!') raise NotImplementedError('Method not implemented!') def RunQuery(self, request, context): \"\"\"Queries for entities.", "This applies to all keys and entities, including those in values, except keys", "grpc.framework.interfaces.face import utilities as face_utilities import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2 import google.cloud.grpc.datastore.v1.datastore_pb2 as google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2", "entities with keys with normalized partition IDs. This applies to all keys and", "implemented!') def Rollback(self, request, context): \"\"\"Rolls back a transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "not implemented!') def RunQuery(self, request, context): \"\"\"Queries for entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not", "{ 'Lookup': grpc.unary_unary_rpc_method_handler( servicer.Lookup, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.LookupResponse.SerializeToString, ), 'RunQuery': grpc.unary_unary_rpc_method_handler( servicer.RunQuery, request_deserializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryRequest.FromString, response_serializer=google_dot_cloud_dot_grpc_dot_datastore_dot_v1_dot_datastore__pb2.RunQueryResponse.SerializeToString, ),", "not implemented!') def Commit(self, request, context): \"\"\"Commits a transaction, optionally creating, deleting or", "implemented!') def RunQuery(self, request, context): \"\"\"Queries for entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!')", "request, context): \"\"\"Commits a transaction, optionally creating, deleting or modifying some entities. \"\"\"", "\"\"\"Begins a new transaction. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')", "entities. \"\"\" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def BeginTransaction(self, request,", "the keys in its input entities, and always returns entities with keys with" ]
[ "`nikola ping` from the `ping` # plugin (`nikola plugin -i ping`). Or run", "latest featured post in a large box, with the previewimage as its background.", "is the default language? DEFAULT_LANG = \"en\" # What other languages do you", "MAX_IMAGE_SIZE # options, but will have to be referenced manually to be visible", "sidebar / navigation bar. (translatable) # This is a dict. The keys are", "a preset # named `default` will be executed. You can use as many", "are tuples. NAVIGATION_LINKS = { DEFAULT_LANG: ( (\"/archive.html\", \"Archive\"), (\"/categories/\", \"Tags\"), (\"/rss.xml\", \"RSS", "the tags 'draft', 'mathjax' and 'private' have special # meaning. If set to", "would conflict # with many of the others. # \"pandoc\": ['.rst', '.md', '.txt'],", "instead /foo/default.html => /foo) STRIP_INDEXES = False # List of files relative to", "that is, say, default.html, # it will instead /foo/default.html => /foo) STRIP_INDEXES =", "the input from the source filename # but is disabled by default as", "for your site. It will be used # in a prominent link. Don't", "bootstrap4 (commented) follow. # bootblog4 supports: featured_large featured_small featured_on_mobile # featured_large_image_on_mobile featured_strip_html sidebar", "# in a `nikola deploy` command as you like. # DEPLOY_COMMANDS = {", "<li><a class=\"addthis_button_linkedin\"></a> # <li><a class=\"addthis_button_twitter\"></a> # </ul> # </div> # <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> #", "-rav --delete output/ joe@my.site:/srv/www/site\", # ] # } # github_deploy configuration # For", "a complete URL (that includes the SITE_URL) # URL_TYPE = 'rel_path' # #", "demoted by that much (1 → h1 will become h2 and so on)", "HTML that displayed on “main” blog index.html files. # May be used for", "for RSS feed files # RSS_EXTENSION = \".xml\" # RSS filename base (without", "'images'} # IMAGE_THUMBNAIL_SIZE = 400 # IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}' # ############################################################################# # HTML", "You can define multiple presets and specify them as arguments # to `nikola", "['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ], # displayMath: [ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ], # processEscapes: true #", "!! # # !! You should edit it to your liking. !! #", "# as an accent color (the default ones don’t). Must be a HEX", "post dates. (translatable) # Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time # You can", "{'.MathJax_Display': {\"margin\": 0}} # } # }); # </script> # \"\"\" # Want", "= using LUXON_DATE_FORMAT and local user time (JS, using Luxon) # 2 =", "one # author, author pages are generated. ENABLE_AUTHOR_PAGES = False # If you", "True, 'format': 'DATE_FULL'} # LUXON_DATE_FORMAT = { # DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd", "Nikola plugins # repository. # EXTRA_PLUGINS_DIRS = [] # Add the absolute paths", "same way NAVIGATION_LINKS does, # although themes may not always support them. (translatable)", "if SITE_URL points to server root. The list is used to exclude resources", "############################################################################# # 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)", "HTML from featured post text. 'featured_strip_html': False, # Contents of the sidebar, If", "dates are considered in UTC by default, if you want to use #", "can be anything, data, functions, modules, etc. GLOBAL_CONTEXT = {} # Add functions", "= [] # List of regular expressions, links matching them will always be", "of 'full_path' # URL_TYPE = 'full_path' # Extension for RSS feed files #", "the sidebar is not displayed. 'sidebar': '' } } # POSTS and PAGES", "for that directory. # PAGE_INDEX = False # Enable comments on pages (i.e.", "display an author publicly, you can mark it as hidden. # The author", "Extension for RSS feed files # RSS_EXTENSION = \".xml\" # RSS filename base", "(which may conflict with running # text!), just use this config: # MATHJAX_CONFIG", "tags. USE_TAG_METADATA = False # If set to True, a warning is issued", "# but a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS =", "\"\" # If you want support for the $.$ syntax (which may conflict", "= 'cache' # ############################################################################# # Image Gallery Options # ############################################################################# # Use a", "HIDDEN_CATEGORIES = [] # If ENABLE_AUTHOR_PAGES is set to True and there is", "# Copy the source files for your pages? # Setting it to False", "SHOW_SOURCELINK = False # Copy the source files for your pages? # Setting", "be deployed. # If not set, defaults to SITE_URL # BASE_URL = \"https://example.com/\"", "End of social buttons --> # \"\"\" # Show link to source for", "your COMMENT_SYSTEM_ID which # depends on what comment system you use. The default", "tag will not be displayed on the tag list page and posts. #", "an accent color (the default ones don’t). Must be a HEX value. THEME_COLOR", "the browser UI color (eg. Chrome on Android). Other themes might also use", "# before deploying. GITHUB_COMMIT_SOURCE = True # Where the output site should be", "EXTRA_HEAD_DATA = \"\" # Google Analytics or whatever else you use. Added to", "another time zone, please set TIMEZONE to match. Check the available # list", "ATOM_EXTENSION = \".atom\" # A list of redirection tuples, [(\"foo/from.html\", \"/bar/to.html\")]. # #", ") } # A simple copyright tag for inclusion in RSS feeds that", "UTC by default, if you want to use # another time zone, please", "`nikola deploy` command as you like. # DEPLOY_COMMANDS = { # 'default': [", "as hidden. # The tag will not be displayed on the tag list", "# before </head> # (translatable) # EXTRA_HEAD_DATA = \"\" # Google Analytics or", "(reST/Markdown) # will be demoted by that much (1 → h1 will become", "configuration of Nikola. !! # # !! You should edit it to your", ") # Below this point, everything is optional # Post's dates are considered", "\"<EMAIL>\" BLOG_DESCRIPTION = \"This is a demo site for Nikola.\" # (translatable) #", "be anything, for # example, you may use rsync: # \"rsync -rav --delete", "theme to use. #THEME = \"bootblog4\" THEME = \"disimplex\" # A theme color.", "= [\"render_galleries\"] # Special settings to disable only parts of the indexes plugin.", "False # DISABLE_MAIN_RSS_FEED = False # Add the absolute paths to directories containing", "the pages HEAD tag. This will be added right # before </head> #", "\"pandoc\": ['.rst', '.md', '.txt'], } # Preferred metadata format for new posts #", "but don't add any new # compilers unless you write the interface for", "url GALLERIES_DEFAULT_THUMBNAIL = None # Images will be scaled down according to IMAGE_THUMBNAIL_SIZE", "here (new_post -s) # Specify an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE =", "SITE_URL # BASE_URL = \"https://example.com/\" BLOG_EMAIL = \"<EMAIL>\" BLOG_DESCRIPTION = \"This is a", "and galleries. # (translatable) # RSS_FILENAME_BASE = \"rss\" # Atom filename base (without", "push to, using github_deploy. GITHUB_REMOTE_NAME = 'origin' # Whether or not github_deploy should", "http://cldr.unicode.org/translation/date-time-1/date-time # You can also use 'full', 'long', 'medium', or 'short' # DATE_FORMAT", "(\"/archive.html\", \"Archive\"), (\"/categories/\", \"Tags\"), (\"/rss.xml\", \"RSS feed\"), ), } # Alternative navigation links.", "Wikipedia: TIMEZONE = \"Europe/London\" # Date format used to display post dates. (translatable)", "you do not want to display a tag publicly, you can mark it", "protocol (http/https)! SITE_URL = \"https://example.com/\" # This is the URL where Nikola's output", "KaTeX instead of MathJax? While KaTeX may not support every # feature yet,", "# FUTURE_IS_NOW = False # If True, future dated posts are allowed in", "describing the license, for the sidebar. # (translatable) LICENSE = \"\" # I", "called with template # GLOBAL_CONTEXT as parameter when the template is about to", "# May be used for a greeting. (translatable) FRONT_INDEX_HEADER = { DEFAULT_LANG: ''", "/ partials # REQUIRES the use of 'full_path' # URL_TYPE = 'full_path' #", "'private' tags are found in a post. Useful for checking that # migration", "# Which means process listings from 'listings' into 'output/listings' # A mapping of", "No web server configuration is required. Also enables STRIP_INDEXES. # This can be", "any new # compilers unless you write the interface for it yourself. #", "theme color. In default themes, it might be displayed by some browsers as", "# Special settings to disable only parts of the indexes plugin. # Use", "will still be generated. HIDDEN_CATEGORIES = [] # If ENABLE_AUTHOR_PAGES is set to", "mark it as hidden. # The tag will not be displayed on the", "Image Gallery Options # ############################################################################# # Use a thumbnail (defined by \".. previewimage:\"", "on “main” blog index.html files. # May be used for a greeting. (translatable)", "# Data about this site BLOG_AUTHOR = \"<NAME>\" # (translatable) BLOG_TITLE = \"My", "before the file extension by default, # but a different naming template can", "'short' # DATE_FORMAT = 'yyyy-MM-dd HH:mm' # Date format used to display post", "root. The list is used to exclude resources from # /robots.txt and /sitemap.xml,", "background. 'featured_large': False, # Show the first (remaining) two featured posts in small", "'Contents &copy; {date} <a href=\"mailto:{email}\">{author}</a> - Powered by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}' #", "depends on what comment system you use. The default is # \"nikolademo\" which", "`new_post` is the first entry in the POSTS tuple. # # 'rest' is", "dates are used. (translatable) # Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting # Example for presets:", "relative URL. # # If you don't need any of these, just set", "addthis_label_style addthis_32x32_style\"> # <a class=\"addthis_button_more\">Share</a> # <ul><li><a class=\"addthis_button_facebook\"></a> # <li><a class=\"addthis_button_google_plusone_share\"></a> # <li><a", "on what comment system you use. The default is # \"nikolademo\" which is", "into 'output/listings' # A mapping of languages to file-extensions that represent that language.", "will have to be referenced manually to be visible on the site #", "works just # like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT = 'Contents © {date} <a", "you can mark it as hidden. # The author will not be displayed", "is in the manual. COMMENT_SYSTEM_ID = \"\" # Create index.html for page folders?", "[ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ], # displayMath: [ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ], # processEscapes: true", "location of conf.py # OUTPUT_FOLDER = 'output' # where the \"cache\" of partial", "# {left: \"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\", display: true}, # {left: \"$\", right: \"$\", display:", "template # GLOBAL_CONTEXT as parameter when the template is about to be #", "# You may also want to use github_deploy (see below). # You can", "to be # rendered GLOBAL_CONTEXT_FILLER = [] # Settings for the (boot)Reveal theme", "# You can leave this option blank to disable comments. COMMENT_SYSTEM = \"\"", "'origin' # Whether or not github_deploy should commit to the source branch automatically", "that don't have one # None: show a grey square # '/url/to/file': show", "is used to exclude resources from # /robots.txt and /sitemap.xml, and to inform", "False # If you do not want to display an author publicly, you", "a tag publicly, you can mark it as hidden. # The tag will", "is issued if one of the 'draft', 'mathjax' # and 'private' tags are", "# It can be anything, data, functions, modules, etc. GLOBAL_CONTEXT = {} #", "URL_TYPE = 'full_path' # Extension for RSS feed files # RSS_EXTENSION = \".xml\"", "index.html # Common other alternatives: default.html for IIS, index.php # INDEX_FILE = \"index.html\"", "don't add any new # compilers unless you write the interface for it", "using Luxon) # 2 = using a string like “2 days ago” (JS,", "{ # inlineMath: [ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ], # displayMath: [ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ],", "modules, etc. GLOBAL_CONTEXT = {} # Add functions here and they will be", "DATE_FORMAT and TIMEZONE (without JS) # 1 = using LUXON_DATE_FORMAT and local user", "is set to True, the output written to output # contains only the", "can be configured with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS = {'images': 'images'} # IMAGE_THUMBNAIL_SIZE = 400", "False # Do you want to add a Mathjax config file? # MATHJAX_CONFIG", ";-) # Note: most Nikola-specific extensions are done via the Nikola plugin system,", "will be created in output/foo/from.html that redirects # to the \"/bar/to.html\" URL. notice", "Note that our use of \"server side includes\" / partials # REQUIRES the", "be located # If you don't use an absolute path, it will be", "featured posts in small boxes. 'featured_small': False, # Show featured posts on mobile.", "'html' assumes the file is HTML and just copies it COMPILERS = {", "the defaults. # Consult your engine's documentation on filters if you need help", "default ones don’t). Must be a HEX value. THEME_COLOR = '#5670d4' # Theme", "'featured_large_image_on_mobile': True, # Strip HTML from featured post text. 'featured_strip_html': False, # Contents", "includes the SITE_URL) # URL_TYPE = 'rel_path' # # Note that our use", "is, say, default.html, # it will instead /foo/default.html => /foo) STRIP_INDEXES = False", "It can be anything, data, functions, modules, etc. GLOBAL_CONTEXT = {} # Add", "in the pages HEAD tag. This will be added right # before </head>", "default, # but a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS", "post. Useful for checking that # migration was successful. WARN_ABOUT_TAG_METADATA = False #", "language? DEFAULT_LANG = \"en\" # What other languages do you have? # The", "dated posts right away instead of scheduling them. # Defaults to False. #", "Things that will be passed to CONTENT_FOOTER.format(). This is done CONTENT_FOOTER_FORMATS = {", "the default language? DEFAULT_LANG = \"en\" # What other languages do you have?", "that much (1 → h1 will become h2 and so on) # This", "link ends in /index.html, drop the index.html part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/ #", "extensions are done via the Nikola plugin system, # with the MarkdownExtension class", "# feature yet, it's faster and the output looks better. # USE_KATEX =", "before deploying. GITHUB_COMMIT_SOURCE = True # Where the output site should be located", "(\"posts/*.txt\", \"posts\", \"post.tmpl\"), (\"posts/*.html\", \"posts\", \"post.tmpl\"), ) PAGES = ( (\"pages/*.rst\", \"\", \"page.tmpl\"),", "For a post, the whole path in the hierarchy must be specified, #", "much (1 → h1 will become h2 and so on) # This was", "SITE_URL) # URL_TYPE = 'rel_path' # # Note that our use of \"server", "that will be asked to be excluded # from indexing and other robotic", "added to the global context. # subtheme selection: beige/serif/simple/sky/night/default # transition selection: cube/page/concave/linear/none/default", "{'en': 'en_GB'} # LOCALES = {} # One or more folders containing files", "instead of scheduling them. # Defaults to False. # FUTURE_IS_NOW = False #", "post dates, if local dates are used. (translatable) # Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting", "author pages are generated. ENABLE_AUTHOR_PAGES = False # If you do not want", "check -l`. # You may also want to use github_deploy (see below). #", "URL_TYPE = 'rel_path' # # Note that our use of \"server side includes\"", "blog index.html files. # May be used for a greeting. (translatable) FRONT_INDEX_HEADER =", "published/deployed; not in indexes/sitemap # Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be", "use as many presets # in a `nikola deploy` command as you like.", "this config: # KATEX_AUTO_RENDER = \"\"\" # delimiters: [ # {left: \"$$\", right:", "those galleries that don't have one # None: show a grey square #", "FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value. # DEPLOY_FUTURE = False #", "[\"render_galleries\"] # Special settings to disable only parts of the indexes plugin. #", "list page. # Category pages will still be generated. HIDDEN_CATEGORIES = [] #", "URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced # option", "SITE_URL = \"https://example.com/\" # This is the URL where Nikola's output will be", "check -l\" # LINK_CHECK_WHITELIST = [] # The <hN> tags in HTML generated", "ones don’t). Must be a HEX value. THEME_COLOR = '#5670d4' # Theme configuration.", "located # default: 'cache' # CACHE_FOLDER = 'cache' # ############################################################################# # Image Gallery", "are found in a post. Useful for checking that # migration was successful.", "[\"\\\\\\(\",\"\\\\\\)\"] ], # displayMath: [ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ], # processEscapes: true # },", "the gallery's index) in # list of galleries for each gallery GALLERIES_USE_THUMBNAIL =", "False # If you do not want to display a category publicly, you", "https://getnikola.com/handbook.html#deploying-to-github # You will need to configure the deployment branch on GitHub. GITHUB_SOURCE_BRANCH", "# src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A small copyright notice for the page footer (in HTML).", "is the URL where Nikola's output will be deployed. # If not set,", "image in that url GALLERIES_DEFAULT_THUMBNAIL = None # Images will be scaled down", "DEPLOY_FUTURE = False # If False, draft posts will not be deployed #", "(enabled) and bootstrap4 (commented) follow. # bootblog4 supports: featured_large featured_small featured_on_mobile # featured_large_image_on_mobile", "right: \"\\\\\\\\)\", display: false} # ] # \"\"\" # What Markdown extensions to", "may also want to use github_deploy (see below). # You can define multiple", "# BASE_URL = \"https://example.com/\" BLOG_EMAIL = \"<EMAIL>\" BLOG_DESCRIPTION = \"This is a demo", "http://mysite/foo/bar/ # (Uses the INDEX_FILE setting, so if that is, say, default.html, #", "the indexes plugin. # Use with care. # DISABLE_INDEXES = False # DISABLE_MAIN_ATOM_FEED", "ends in /index.html, drop the index.html part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses", "destination}. # Default is: # LISTINGS_FOLDERS = {'listings': 'listings'} # Which means process", "language. # Feel free to add or delete extensions to any list, but", "use them. # For example, the `plugins` directory of your clone of the", "set to [] REDIRECTIONS = [] # Presets of commands to execute to", "default.html, # it will instead /foo/default.html => /foo) STRIP_INDEXES = False # List", "titles are in <h1> tags too, for # example. # (defaults to 1.)", "If you want support for the $.$ syntax (which may # conflict with", "# to the metadata. PRETTY_URLS = False # If True, publish future dated", "global_context things you want available on all your templates. # It can be", "URL where Nikola's output will be deployed. # If not set, defaults to", "and the output looks better. # USE_KATEX = False # KaTeX auto-render settings.", "issued if one of the 'draft', 'mathjax' # and 'private' tags are found", "to use them. # For example, the `v7` directory of your clone of", "text. 'featured_strip_html': False, # Contents of the sidebar, If empty, the sidebar is", "( (\"pages/*.rst\", \"\", \"page.tmpl\"), (\"pages/*.md\", \"\", \"page.tmpl\"), (\"pages/*.txt\", \"\", \"page.tmpl\"), (\"pages/*.html\", \"\", \"page.tmpl\"),", "the category list page. # Category pages will still be generated. HIDDEN_CATEGORIES =", "the templates # ############################################################################# # 'Read more...' for the index page, if INDEX_TEASERS", "False # If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output", "# If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in #", "-*- import time # !! This is the configuration of Nikola. !! #", "site. It will be used # in a prominent link. Don't forget the", "Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time # You can also use 'full', 'long',", "True # Where the output site should be located # If you don't", "the rule specified here (new_post -s) # Specify an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html", "not be deployed # DEPLOY_DRAFTS = True # Allows scheduling of posts using", "running text!), just use this config: # KATEX_AUTO_RENDER = \"\"\" # delimiters: [", "# # Your theme must support it, Bootstrap already does. # DATE_FANCINESS =", "# List of files relative to the server root (!) that will be", "to True, categories can be organized in # hierarchies. For a post, the", "markdown extensions (See https://python-markdown.github.io/reference/) # Default is {} (no config at all) #", "}); # </script> # \"\"\" # Want to use KaTeX instead of MathJax?", "the sidebar / navigation bar. (translatable) # This is a dict. The keys", "Defaults to False. # FUTURE_IS_NOW = False # If True, future dated posts", "name of the remote where you wish to push to, using github_deploy. GITHUB_REMOTE_NAME", "indexes/sitemap # Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.", "['Guest'] # Optional HTML that displayed on “main” blog index.html files. # May", "# tex2jax: { # inlineMath: [ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ], # displayMath: [ ['$$','$$'],", "to source for the posts? SHOW_SOURCELINK = False # Copy the source files", "your clone of the Nikola plugins # repository. # EXTRA_PLUGINS_DIRS = [] #", "forward slash ('/') to separate paths. Use a backslash ('\\') to escape #", "executed. You can use as many presets # in a `nikola deploy` command", "not displayed. 'sidebar': '' } } # POSTS and PAGES contains (wildcard, destination,", "'markdown.extensions.toc'] # Options to be passed to markdown extensions (See https://python-markdown.github.io/reference/) # Default", "mobile. 'featured_on_mobile': True, # Show image in `featured_large` on mobile. # `featured_small` displays", "here, or even make it empty (which is # the default right now)", "tuples. # (translatable) # POSTS = ( (\"posts/*.rst\", \"posts\", \"post.tmpl\"), (\"posts/*.md\", \"posts\", \"post.tmpl\"),", "a URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced #", "example, you may use rsync: # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\" # And", "IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}' # ############################################################################# # HTML fragments and diverse things that are", "\"wiki\": ['.wiki'], \"ipynb\": ['.ipynb'], \"html\": ['.html', '.htm'], # PHP files are rendered the", "them. # For example, the `plugins` directory of your clone of the Nikola", "Consult your engine's documentation on filters if you need help defining # those.", "be passed to CONTENT_FOOTER.format(). This is done CONTENT_FOOTER_FORMATS = { DEFAULT_LANG: ( (),", "# if SITE_URL points to server root. The list is used to exclude", "default language? DEFAULT_LANG = \"en\" # What other languages do you have? #", "Don't forget the protocol (http/https)! SITE_URL = \"https://example.com/\" # This is the URL", "# <img alt=\"Creative Commons License BY-NC-SA\" # style=\"border-width:0; margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A", "of commands to execute to deploy. Can be anything, for # example, you", "posts will not be deployed # DEPLOY_DRAFTS = True # Allows scheduling of", "for new posts # \"YAML\": YAML wrapped in \"---\" METADATA_FORMAT = \"YAML\" #", "''} # Which means copy 'files' into 'output' # One or more folders", "looks better. # USE_KATEX = False # KaTeX auto-render settings. If you want", "although themes may not always support them. (translatable) # (Bootstrap 4: right-side of", "= \"\"\" # delimiters: [ # {left: \"$$\", right: \"$$\", display: true}, #", "= False # Enable comments on picture gallery pages? # COMMENTS_IN_GALLERIES = False", "# Pandoc detects the input from the source filename # but is disabled", "the index.html part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses the INDEX_FILE setting, so", "will instead /foo/default.html => /foo) STRIP_INDEXES = False # List of files relative", "paths to directories containing plugins to use them. # For example, the `plugins`", "according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE # options, but will have to be referenced", "= {'listings': 'listings'} # Which means process listings from 'listings' into 'output/listings' #", "always support them. (translatable) # (Bootstrap 4: right-side of navbar, Bootblog 4: right", "for each gallery GALLERIES_USE_THUMBNAIL = False # Image to use as thumbnail for", "Config for bootblog4: THEME_CONFIG = { DEFAULT_LANG: { # Show the latest featured", "# Note that our use of \"server side includes\" / partials # REQUIRES", "display post dates, if local dates are used. (translatable) # Used by Luxon:", "# style=\"border-width:0; margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A small copyright notice for the page", "/robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml. # ROBOTS_EXCLUSIONS =", "# FILES_FOLDERS = {'files': ''} # Which means copy 'files' into 'output' #", "\"\"\" # Show link to source for the posts? SHOW_SOURCELINK = False #", "do not want to display a category publicly, you can mark it as", "False) # navbar_custom_bg (defaults to '') # Config for bootblog4: THEME_CONFIG = {", "= \"\" # Bundle JS and CSS into single files to make site", "files have .php extensions, making it possible to run # them without reconfiguring", "# caused by setting slug to `index`), the PAGE_INDEX # will not be", "directory. # PAGE_INDEX = False # Enable comments on pages (i.e. not posts)?", "site loading faster in a HTTP/1.1 # environment but is not recommended for", "configuration is required. Also enables STRIP_INDEXES. # This can be disabled on a", "USE_KATEX = False # KaTeX auto-render settings. If you want support for the", "will become h2 and so on) # This was a hidden feature of", "(\"pages/*.txt\", \"\", \"page.tmpl\"), (\"pages/*.html\", \"\", \"page.tmpl\"), ) # Below this point, everything is", "with running # text!), just use this config: # MATHJAX_CONFIG = \"\"\" #", "to use. #THEME = \"bootblog4\" THEME = \"disimplex\" # A theme color. In", "want available on all your templates. # It can be anything, data, functions,", "large box, with the previewimage as its background. 'featured_large': False, # Show the", "to separate paths. Use a backslash ('\\') to escape # a forward slash", "# Image Gallery Options # ############################################################################# # Use a thumbnail (defined by \"..", "sidebar, If empty, the sidebar is not displayed. 'sidebar': '' } } #", "systems are supported by Nikola: # disqus, facebook, intensedebate, isso, muut, commento, utterances", "for HTTP/2.0 when caching is used. # Defaults to True. # USE_BUNDLES =", "want to use. Be careful :-) # DISABLED_PLUGINS = [\"render_galleries\"] # Special settings", "(defaults to '') # Config for bootblog4: THEME_CONFIG = { DEFAULT_LANG: { #", "or whatever else you use. Added to the bottom of <body> # in", "about this site BLOG_AUTHOR = \"<NAME>\" # (translatable) BLOG_TITLE = \"My Nikola Site\"", "The default compiler for `new_post` is the first entry in the POSTS tuple.", "# </div> # <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!-- End of social buttons --> #", "# This is a dict. The keys are languages, and values are tuples.", "'featured_strip_html': False, # Contents of the sidebar, If empty, the sidebar is not", "# This can be disabled on a per-page/post basis by adding # ..", "# inlineMath: [ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ], # displayMath: [ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ], #", "if a page would conflict with the index file (usually # caused by", "use the scheduling rule to all posts (not pages!) by default # SCHEDULE_ALL", "(translatable) # SOCIAL_BUTTONS_CODE = \"\"\" # <!-- Social buttons --> # <div id=\"addthisbox\"", "['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ], # processEscapes: true # }, # displayAlign: 'center', // Change", "if local dates are used. (translatable) # Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting # Example", "# Enable comments on pages (i.e. not posts)? # COMMENTS_IN_PAGES = False #", "written to output # contains only the name of the leaf category and", "# Theme configuration. Fully theme-dependent. (translatable) # Samples for bootblog4 (enabled) and bootstrap4", "RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}' RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS # To use", "# Show link to source for the posts? SHOW_SOURCELINK = False # Copy", "for the feeds, if FEED_TEASERS is True (translatable) FEED_READ_MORE_LINK = '<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>'", "folders? # WARNING: if a page would conflict with the index file (usually", "DEFAULT_LANG: '' } # URLs to other posts/pages can take 3 forms: #", "contains (wildcard, destination, template) tuples. # (translatable) # POSTS = ( (\"posts/*.rst\", \"posts\",", "same value. # DEPLOY_FUTURE = False # If False, draft posts will not", "auto-render settings. If you want support for the $.$ syntax (which may #", "become h2 and so on) # This was a hidden feature of the", "# You will need to configure the deployment branch on GitHub. GITHUB_SOURCE_BRANCH =", "# (translatable) CONTENT_FOOTER = 'Contents &copy; {date} <a href=\"mailto:{email}\">{author}</a> - Powered by <a", "one of the 'draft', 'mathjax' # and 'private' tags are found in a", "PHP files are rendered the usual way (i.e. with the full templates). #", "The default is # \"nikolademo\" which is a test account for Disqus. More", "of the others. # \"pandoc\": ['.rst', '.md', '.txt'], } # Preferred metadata format", "Creative Commons' wizard: # https://creativecommons.org/choose/ # LICENSE = \"\"\" # <a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\">", "mark it as hidden. # The category will not be displayed on the", "tags are handled like regular tags. USE_TAG_METADATA = False # If set to", "\"textile\": ['.textile'], \"txt2tags\": ['.t2t'], \"bbcode\": ['.bb'], \"wiki\": ['.wiki'], \"ipynb\": ['.ipynb'], \"html\": ['.html', '.htm'],", "be visible on the site # (the thumbnail has ``.thumbnail`` added before the", "supports: featured_large featured_small featured_on_mobile # featured_large_image_on_mobile featured_strip_html sidebar # bootstrap4 supports: navbar_light (defaults", "be used for a greeting. (translatable) FRONT_INDEX_HEADER = { DEFAULT_LANG: '' } #", "be added here. # Defaults are markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta is required for Markdown", "is a path specifying the # subcategory called '\\' of the top-level category", "anything, data, functions, modules, etc. GLOBAL_CONTEXT = {} # Add functions here and", "can also use 'full', 'long', 'medium', or 'short' # DATE_FORMAT = 'yyyy-MM-dd HH:mm'", "a URL with the full path from the root # absolute: a complete", "Commons License BY-NC-SA\" # style=\"border-width:0; margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A small copyright notice", "dates. (translatable) # Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time # You can also", "be asked to be excluded # from indexing and other robotic spidering. *", "use. Be careful :-) # DISABLED_PLUGINS = [\"render_galleries\"] # Special settings to disable", "data, functions, modules, etc. GLOBAL_CONTEXT = {} # Add functions here and they", "“main” blog index.html files. # May be used for a greeting. (translatable) FRONT_INDEX_HEADER", "Markdown and reST compilers in the # past. Useful especially if your post", "# list of galleries for each gallery GALLERIES_USE_THUMBNAIL = False # Image to", "all your templates. # It can be anything, data, functions, modules, etc. GLOBAL_CONTEXT", "source tracking. FEED_LINKS_APPEND_QUERY = False # A HTML fragment describing the license, for", "# Default is {} (no config at all) # MARKDOWN_EXTENSION_CONFIGS = {} #", "a forward slash or a backslash (i.e. '\\//\\\\' is a path specifying the", "# A simple copyright tag for inclusion in RSS feeds that works just", "What other languages do you have? # The format is {\"translationcode\" : \"path/to/translation\"", "# 2 = using a string like “2 days ago” (JS, using Luxon)", "# done in the code, hope you don't mind ;-) # Note: most", "be located # default: 'cache' # CACHE_FOLDER = 'cache' # ############################################################################# # Image", "DEFAULT_LANG: ( (\"/archive.html\", \"Archive\"), (\"/categories/\", \"Tags\"), (\"/rss.xml\", \"RSS feed\"), ), } # Alternative", "############################################################################# # Use a thumbnail (defined by \".. previewimage:\" in the gallery's index)", "# USE_BUNDLES = True USE_BUNDLES = False # Plugins you don't want to", "['mathjax'] # If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in", "path, it will be considered as relative # to the location of conf.py", "systems. The following comment systems are supported by Nikola: # disqus, facebook, intensedebate,", "the PAGE_INDEX # will not be generated for that directory. # PAGE_INDEX =", "PAGE_INDEX # will not be generated for that directory. # PAGE_INDEX = False", "# processEscapes: true # }, # displayAlign: 'center', // Change this to 'left'", "'format': 'yyyy-MM-dd HH:mm'}, # } # Date fanciness. # # 0 = using", "The list is used to exclude resources from # /robots.txt and /sitemap.xml, and", "(translatable) # This is a dict. The keys are languages, and values are", "pages will still be generated. HIDDEN_AUTHORS = ['Guest'] # Optional HTML that displayed", "site should be located # If you don't use an absolute path, it", "you want in the pages HEAD tag. This will be added right #", "color. In default themes, it might be displayed by some browsers as #", "Other themes might also use it # as an accent color (the default", "you can mark it as hidden. # The tag will not be displayed", "have one # None: show a grey square # '/url/to/file': show the image", "# 'Read more...' for the feeds, if FEED_TEASERS is True (translatable) FEED_READ_MORE_LINK =", "# styles: {'.MathJax_Display': {\"margin\": 0}} # } # }); # </script> # \"\"\"", "Rule: https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE = '' # If True, use the scheduling rule", "into the output. # The format is a dictionary of {source: relative destination}.", "Plugins you don't want to use. Be careful :-) # DISABLED_PLUGINS = [\"render_galleries\"]", "= \"https://example.com/\" BLOG_EMAIL = \"<EMAIL>\" BLOG_DESCRIPTION = \"This is a demo site for", "(which was the default for a # long time). Insert anything you want", "tags in HTML generated by certain compilers (reST/Markdown) # will be demoted by", "to recognize them. \"php\": ['.php'], # Pandoc detects the input from the source", "deploy`. If no arguments are specified, a preset # named `default` will be", "to output # contains only the name of the leaf category and not", "``.thumbnail`` added before the file extension by default, # but a different naming", "that are used by the templates # ############################################################################# # 'Read more...' for the", "be generated for that directory. # PAGE_INDEX = False # Enable comments on", "use this config: # KATEX_AUTO_RENDER = \"\"\" # delimiters: [ # {left: \"$$\",", "display post dates. (translatable) # Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time # You", "which is a test account for Disqus. More information # is in the", "OUTPUT_FOLDER = 'output' # where the \"cache\" of partial generated content should be", "FILES_FOLDERS = {'files': ''} # Which means copy 'files' into 'output' # One", "yourself. # # The default compiler for `new_post` is the first entry in", "and RSS feeds. Advanced # option used for traffic source tracking. FEED_LINKS_APPEND_QUERY =", "the code, hope you don't mind ;-) # Note: most Nikola-specific extensions are", "the bottom of <body> # in the default template (base.tmpl). # (translatable) #", "and CSS into single files to make site loading faster in a HTTP/1.1", "'markdown.extensions.extra', 'markdown.extensions.toc'] # Options to be passed to markdown extensions (See https://python-markdown.github.io/reference/) #", "in <h1> tags too, for # example. # (defaults to 1.) # DEMOTE_HEADERS", "keys are languages, and values are tuples. NAVIGATION_LINKS = { DEFAULT_LANG: ( (\"/archive.html\",", "manually to be visible on the site # (the thumbnail has ``.thumbnail`` added", "entry in the POSTS tuple. # # 'rest' is reStructuredText # 'markdown' is", "And you also need to add your COMMENT_SYSTEM_ID which # depends on what", "also get gist, nikola and podcast because those are # done in the", "pages? # Setting it to False implies SHOW_SOURCELINK = False COPY_SOURCES = False", "like? TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' # Links for the sidebar / navigation bar. (translatable)", "'.markdown'], \"textile\": ['.textile'], \"txt2tags\": ['.t2t'], \"bbcode\": ['.bb'], \"wiki\": ['.wiki'], \"ipynb\": ['.ipynb'], \"html\": ['.html',", "# Use a thumbnail (defined by \".. previewimage:\" in the gallery's index) in", "Where the output site should be located # If you don't use an", "the sidebar, If empty, the sidebar is not displayed. 'sidebar': '' } }", "is: # LISTINGS_FOLDERS = {'listings': 'listings'} # Which means process listings from 'listings'", "# COMMENTS_IN_PAGES = False # Enable comments on picture gallery pages? # COMMENTS_IN_GALLERIES", "MATHJAX_CONFIG = \"\"\" # <script type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({ # tex2jax: { # inlineMath:", "used for a language. # For example, to use British instead of US", "Nikola Site\" # (translatable) # This is the main URL for your site.", "liking. !! # # Data about this site BLOG_AUTHOR = \"<NAME>\" # (translatable)", "= False # Add the absolute paths to directories containing plugins to use", "IIS, index.php # INDEX_FILE = \"index.html\" # If a link ends in /index.html,", "You may also want to use github_deploy (see below). # You can define", "which # depends on what comment system you use. The default is #", "True. # USE_BUNDLES = True USE_BUNDLES = False # Plugins you don't want", "# Optional HTML that displayed on “main” blog index.html files. # May be", "filters, along with the defaults. # Consult your engine's documentation on filters if", "are supported by Nikola: # disqus, facebook, intensedebate, isso, muut, commento, utterances #", "= '<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' # Append a URL query to the FEED_READ_MORE_LINK in", "\"/category/*.html\"] # Instead of putting files in <slug>.html, put them in <slug>/index.html. #", "a HEX value. THEME_COLOR = '#5670d4' # Theme configuration. Fully theme-dependent. (translatable) #", "# And you also need to add your COMMENT_SYSTEM_ID which # depends on", "defining # those. # TEMPLATE_FILTERS = {} # Put in global_context things you", "'draft', 'mathjax' # and 'private' tags are found in a post. Useful for", "the whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY = False # If you do not want to", "to index.html # Common other alternatives: default.html for IIS, index.php # INDEX_FILE =", "right: \"\\\\\\\\]\", display: true}, # {left: \"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\", display: true}, # {left:", "displays them only on desktop. 'featured_large_image_on_mobile': True, # Strip HTML from featured post", "template) tuples. # (translatable) # POSTS = ( (\"posts/*.rst\", \"posts\", \"post.tmpl\"), (\"posts/*.md\", \"posts\",", "backslash (i.e. '\\//\\\\' is a path specifying the # subcategory called '\\' of", "REDIRECTIONS = [] # Presets of commands to execute to deploy. Can be", "`featured_large` on mobile. # `featured_small` displays them only on desktop. 'featured_large_image_on_mobile': True, #", "files in <slug>.html, put them in <slug>/index.html. # No web server configuration is", "do not want to display an author publicly, you can mark it as", "the absolute paths to directories containing plugins to use them. # For example,", "indexes plugin. # Use with care. # DISABLE_INDEXES = False # DISABLE_MAIN_ATOM_FEED =", "robotic spidering. * is supported. Will only be effective # if SITE_URL points", "buttons --> # <div id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style\"> # <a class=\"addthis_button_more\">Share</a>", "Nikola.\" # (translatable) # What is the default language? DEFAULT_LANG = \"en\" #", "# GLOBAL_CONTEXT as parameter when the template is about to be # rendered", "href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' # Append a URL query to the FEED_READ_MORE_LINK in Atom and", "don't use an absolute path, it will be considered as relative # to", "asked to be excluded # from indexing and other robotic spidering. * is", "# # Data about this site BLOG_AUTHOR = \"<NAME>\" # (translatable) BLOG_TITLE =", "what comment system you use. The default is # \"nikolademo\" which is a", "added here. # Defaults are markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta is required for Markdown metadata.", "# Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE # options,", "# Templates will use those filters, along with the defaults. # Consult your", "# the path will be used as a prefix for the generated pages", "(\"pages/*.html\", \"\", \"page.tmpl\"), ) # Below this point, everything is optional # Post's", "you wish to push to, using github_deploy. GITHUB_REMOTE_NAME = 'origin' # Whether or", "a HTTP/1.1 # environment but is not recommended for HTTP/2.0 when caching is", "Chrome on Android). Other themes might also use it # as an accent", "don’t). Must be a HEX value. THEME_COLOR = '#5670d4' # Theme configuration. Fully", "on the author list page and posts. # Tag pages will still be", "to be excluded # from indexing and other robotic spidering. * is supported.", "required for Markdown metadata. MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc'] # Options to", "CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time # You can also use 'full', 'long', 'medium', or 'short'", "(translatable) # Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting # Example for presets: {'preset': True, 'format':", "\"YAML\" # If you do not want to display a tag publicly, you", "for a language. # For example, to use British instead of US English:", "template can be configured with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS = {'images': 'images'} # IMAGE_THUMBNAIL_SIZE =", "() } # Name of the theme to use. #THEME = \"bootblog4\" THEME", "= \"\"\" # <a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img alt=\"Creative Commons License BY-NC-SA\" #", "© {date} {author} {license}' RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS # To use comments, you can", "like regular tags. USE_TAG_METADATA = False # If set to True, a warning", "'' } } # POSTS and PAGES contains (wildcard, destination, template) tuples. #", "SCHEDULE_RULE = '' # If True, use the scheduling rule to all posts", "'Read more...' for the index page, if INDEX_TEASERS is True (translatable) INDEX_READ_MORE_LINK =", "# } # github_deploy configuration # For more details, read the manual: #", "not want to display a tag publicly, you can mark it as hidden.", "# github_deploy configuration # For more details, read the manual: # https://getnikola.com/handbook.html#deploying-to-github #", "# <li><a class=\"addthis_button_linkedin\"></a> # <li><a class=\"addthis_button_twitter\"></a> # </ul> # </div> # <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script>", "deploy` command as you like. # DEPLOY_COMMANDS = { # 'default': [ #", "\".. previewimage:\" in the gallery's index) in # list of galleries for each", "for Nikola.\" # (translatable) # What is the default language? DEFAULT_LANG = \"en\"", "The category will not be displayed on the category list page. # Category", "of these, just set to [] REDIRECTIONS = [] # Presets of commands", "that will be passed to CONTENT_FOOTER.format(). This is done CONTENT_FOOTER_FORMATS = { DEFAULT_LANG:", "in the manual. COMMENT_SYSTEM_ID = \"\" # Create index.html for page folders? #", "for a # long time). Insert anything you want here, or even make", "ATOM_FILENAME_BASE = \"feed\" # Extension for Atom feed files # ATOM_EXTENSION = \".atom\"", "clone of the Nikola themes # repository. # EXTRA_THEMES_DIRS = [] # List", "current page/post (default) # full_path: a URL with the full path from the", "(translatable) LICENSE = \"\" # I recommend using the Creative Commons' wizard: #", "Post per Index Page # Defaults to 10 # INDEX_DISPLAY_POST_COUNT = 10 #", "# INDEX_FILE = \"index.html\" # If a link ends in /index.html, drop the", "will not be displayed on the author list page and posts. # Tag", "process listings from 'listings' into 'output/listings' # A mapping of languages to file-extensions", "use of 'full_path' # URL_TYPE = 'full_path' # Extension for RSS feed files", "If you don't use an absolute path, it will be considered as relative", "LICENSE = \"\" # I recommend using the Creative Commons' wizard: # https://creativecommons.org/choose/", "want support for the $.$ syntax (which may conflict with running # text!),", "# MARKDOWN_EXTENSION_CONFIGS = {} # Social buttons. This is sample code for AddThis", "'mathjax' and 'private' have special # meaning. If set to False, these tags", "root # absolute: a complete URL (that includes the SITE_URL) # URL_TYPE =", "a dict. The keys are languages, and values are tuples. NAVIGATION_LINKS = {", "# ############################################################################# # HTML fragments and diverse things that are used by the", "\"es\": \"./es\", } # What will translated input files be named like? TRANSLATIONS_PATTERN", "publicly, you can mark it as hidden. # The author will not be", "should commit to the source branch automatically # before deploying. GITHUB_COMMIT_SOURCE = True", "<li><a class=\"addthis_button_twitter\"></a> # </ul> # </div> # <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!-- End of", "/foo/default.html => /foo) STRIP_INDEXES = False # List of files relative to the", "href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}' # Things that will be passed to CONTENT_FOOTER.format(). This is", "HTML generated by certain compilers (reST/Markdown) # will be demoted by that much", "rel_path: a relative URL to the current page/post (default) # full_path: a URL", "on pages (i.e. not posts)? # COMMENTS_IN_PAGES = False # Enable comments on", "away instead of scheduling them. # Defaults to False. # FUTURE_IS_NOW = False", "<!-- End of social buttons --> # \"\"\" # Show link to source", "migration was successful. WARN_ABOUT_TAG_METADATA = False # Templates will use those filters, along", "for indexes and galleries. # (translatable) # RSS_FILENAME_BASE = \"rss\" # Atom filename", "to disable only parts of the indexes plugin. # Use with care. #", "\"\", \"page.tmpl\"), ) # Below this point, everything is optional # Post's dates", "index.html files. # May be used for a greeting. (translatable) FRONT_INDEX_HEADER = {", "to 10 # INDEX_DISPLAY_POST_COUNT = 10 # Extra things you want in the", "anything, for # example, you may use rsync: # \"rsync -rav --delete output/", "functions, modules, etc. GLOBAL_CONTEXT = {} # Add functions here and they will", "True, categories can be organized in # hierarchies. For a post, the whole", "as it would conflict # with many of the others. # \"pandoc\": ['.rst',", "\"license\": LICENSE } ) } # A simple copyright tag for inclusion in", "= 10 # Extra things you want in the pages HEAD tag. This", "Put in global_context things you want available on all your templates. # It", "Show the first (remaining) two featured posts in small boxes. 'featured_small': False, #", "feeds. Advanced # option used for traffic source tracking. FEED_LINKS_APPEND_QUERY = False #", "care. # DISABLE_INDEXES = False # DISABLE_MAIN_ATOM_FEED = False # DISABLE_MAIN_RSS_FEED = False", "to file-extensions that represent that language. # Feel free to add or delete", "THEME_COLOR = '#5670d4' # Theme configuration. Fully theme-dependent. (translatable) # Samples for bootblog4", "For example, the `v7` directory of your clone of the Nikola themes #", "DISABLE_MAIN_RSS_FEED = False # Add the absolute paths to directories containing plugins to", "\"bootblog4\" THEME = \"disimplex\" # A theme color. In default themes, it might", "full path from the root # absolute: a complete URL (that includes the", "$.$ syntax (which may conflict with running # text!), just use this config:", "documentation on filters if you need help defining # those. # TEMPLATE_FILTERS =", "COMMENTS_IN_PAGES = False # Enable comments on picture gallery pages? # COMMENTS_IN_GALLERIES =", "# # 'rest' is reStructuredText # 'markdown' is Markdown # 'html' assumes the", "not want to display a category publicly, you can mark it as hidden.", "root (!) that will be asked to be excluded # from indexing and", "thumbnail (defined by \".. previewimage:\" in the gallery's index) in # list of", "be displayed by some browsers as # the browser UI color (eg. Chrome", "# DEPLOY_FUTURE = False # If False, draft posts will not be deployed", "more folders containing code listings to be processed and published on # the", "to 1.) # DEMOTE_HEADERS = 1 # If set to True, the tags", "side includes\" / partials # REQUIRES the use of 'full_path' # URL_TYPE =", "right-side of navbar, Bootblog 4: right side of title) NAVIGATION_ALT_LINKS = { DEFAULT_LANG:", "What is the default language? DEFAULT_LANG = \"en\" # What other languages do", "the leaf category and not the whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY = False # If", "\"post.tmpl\"), ) PAGES = ( (\"pages/*.rst\", \"\", \"page.tmpl\"), (\"pages/*.md\", \"\", \"page.tmpl\"), (\"pages/*.txt\", \"\",", "are specified, a preset # named `default` will be executed. You can use", "comment system you use. The default is # \"nikolademo\" which is a test", "details, read the manual: # https://getnikola.com/handbook.html#deploying-to-github # You will need to configure the", "containing code listings to be processed and published on # the site. The", "# This is the URL where Nikola's output will be deployed. # If", "query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced # option used", "want here, or even make it empty (which is # the default right", "CATEGORY_OUTPUT_FLAT_HIERARCHY = False # If you do not want to display a category", "by that much (1 → h1 will become h2 and so on) #", "deployed. # If not set, defaults to SITE_URL # BASE_URL = \"https://example.com/\" BLOG_EMAIL", "write the interface for it yourself. # # The default compiler for `new_post`", "branch automatically # before deploying. GITHUB_COMMIT_SOURCE = True # Where the output site", "# meaning. If set to False, these tags are handled like regular tags.", "List of files relative to the server root (!) that will be asked", "only be effective # if SITE_URL points to server root. The list is", "Image to use as thumbnail for those galleries that don't have one #", "by default, # but a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT).", "= {'images': 'images'} # IMAGE_THUMBNAIL_SIZE = 400 # IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}' # #############################################################################", "to CONTENT_FOOTER.format(). This is done CONTENT_FOOTER_FORMATS = { DEFAULT_LANG: ( (), { \"email\":", "\"rss\" # Atom filename base (without extension); used for indexes. # (translatable) ATOM_FILENAME_BASE", "the author list page and posts. # Tag pages will still be generated.", "'.htm'], # PHP files are rendered the usual way (i.e. with the full", "false}, # {left: \"\\\\\\\\(\", right: \"\\\\\\\\)\", display: false} # ] # \"\"\" #", "to exclude resources from # /robots.txt and /sitemap.xml, and to inform search engines", "on all your templates. # It can be anything, data, functions, modules, etc.", "# If you don't need any of these, just set to [] REDIRECTIONS", "disqus, facebook, intensedebate, isso, muut, commento, utterances # You can leave this option", "be named like? TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' # Links for the sidebar / navigation", "# Your theme must support it, Bootstrap already does. # DATE_FANCINESS = 0", "in small boxes. 'featured_small': False, # Show featured posts on mobile. 'featured_on_mobile': True,", "# disqus, facebook, intensedebate, isso, muut, commento, utterances # You can leave this", "Site\" # (translatable) # This is the main URL for your site. It", "\"My Nikola Site\" # (translatable) # This is the main URL for your", "# EXTRA_PLUGINS_DIRS = [] # Add the absolute paths to directories containing themes", "class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style\"> # <a class=\"addthis_button_more\">Share</a> # <ul><li><a class=\"addthis_button_facebook\"></a> # <li><a", "them in <slug>/index.html. # No web server configuration is required. Also enables STRIP_INDEXES.", "extensions to enable? # You will also get gist, nikola and podcast because", "('\\') to escape # a forward slash or a backslash (i.e. '\\//\\\\' is", "in the code, hope you don't mind ;-) # Note: most Nikola-specific extensions", "{ DEFAULT_LANG: { # Show the latest featured post in a large box,", "= 1 # If set to True, the tags 'draft', 'mathjax' and 'private'", "footer (in HTML). # (translatable) CONTENT_FOOTER = 'Contents &copy; {date} <a href=\"mailto:{email}\">{author}</a> -", "to False. # FUTURE_IS_NOW = False # If True, future dated posts are", "relative to the server root (!) that will be asked to be excluded", "do you have? # The format is {\"translationcode\" : \"path/to/translation\" } # the", "# named `default` will be executed. You can use as many presets #", "ENABLE_AUTHOR_PAGES = False # If you do not want to display an author", "# Whether or not github_deploy should commit to the source branch automatically #", "PAGE_INDEX = False # Enable comments on pages (i.e. not posts)? # COMMENTS_IN_PAGES", "Contents of the sidebar, If empty, the sidebar is not displayed. 'sidebar': ''", "mark it as hidden. # The author will not be displayed on the", "and TIMEZONE (without JS) # 1 = using LUXON_DATE_FORMAT and local user time", "Tag pages will still be generated. HIDDEN_AUTHORS = ['Guest'] # Optional HTML that", "multiple presets and specify them as arguments # to `nikola deploy`. If no", "supported. Will only be effective # if SITE_URL points to server root. The", "EXTRA_THEMES_DIRS = [] # List of regular expressions, links matching them will always", "\"en\" # What other languages do you have? # The format is {\"translationcode\"", "disabled on a per-page/post basis by adding # .. pretty_url: False # to", "copy 'files' into 'output' # One or more folders containing code listings to", "False # Image to use as thumbnail for those galleries that don't have", "# Category pages will still be generated. HIDDEN_CATEGORIES = [] # If ENABLE_AUTHOR_PAGES", "visible on the site # (the thumbnail has ``.thumbnail`` added before the file", "4: right side of title) NAVIGATION_ALT_LINKS = { DEFAULT_LANG: () } # Name", "= ( (\"posts/*.rst\", \"posts\", \"post.tmpl\"), (\"posts/*.md\", \"posts\", \"post.tmpl\"), (\"posts/*.txt\", \"posts\", \"post.tmpl\"), (\"posts/*.html\", \"posts\",", "{left: \"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\", display: true}, # {left: \"$\", right: \"$\", display: false},", "server configuration is required. Also enables STRIP_INDEXES. # This can be disabled on", "previewimage:\" in the gallery's index) in # list of galleries for each gallery", "# DISABLED_PLUGINS = [\"render_galleries\"] # Special settings to disable only parts of the", "\"\\\\\\\\end{equation*}\", display: true}, # {left: \"$\", right: \"$\", display: false}, # {left: \"\\\\\\\\(\",", "right: \"$$\", display: true}, # {left: \"\\\\\\\\[\", right: \"\\\\\\\\]\", display: true}, # {left:", "the locale/region used for a language. # For example, to use British instead", "HIDDEN_AUTHORS = ['Guest'] # Optional HTML that displayed on “main” blog index.html files.", "of the leaf category and not the whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY = False #", "languages do you have? # The format is {\"translationcode\" : \"path/to/translation\" } #", "DISABLE_INDEXES = False # DISABLE_MAIN_ATOM_FEED = False # DISABLE_MAIN_RSS_FEED = False # Add", "settings. If you want support for the $.$ syntax (which may # conflict", "fanciness. # # 0 = using DATE_FORMAT and TIMEZONE (without JS) # 1", "$.$ syntax (which may # conflict with running text!), just use this config:", "# environment but is not recommended for HTTP/2.0 when caching is used. #", "= False # Do you want to add a Mathjax config file? #", "HH:mm'}, # } # Date fanciness. # # 0 = using DATE_FORMAT and", "# 'default': [ # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\", # ] # }", "posts right away instead of scheduling them. # Defaults to False. # FUTURE_IS_NOW", "scheduling them. # Defaults to False. # FUTURE_IS_NOW = False # If True,", "Use a thumbnail (defined by \".. previewimage:\" in the gallery's index) in #", "# Below this point, everything is optional # Post's dates are considered in", "format is {\"translationcode\" : \"path/to/translation\" } # the path will be used as", "\"index.html\" # If a link ends in /index.html, drop the index.html part. #", "themes may not always support them. (translatable) # (Bootstrap 4: right-side of navbar,", "is # the default right now) # (translatable) # SOCIAL_BUTTONS_CODE = \"\"\" #", "Which means copy 'files' into 'output' # One or more folders containing code", "galleries. # (translatable) # RSS_FILENAME_BASE = \"rss\" # Atom filename base (without extension);", "file will be created in output/foo/from.html that redirects # to the \"/bar/to.html\" URL.", "file-extensions that represent that language. # Feel free to add or delete extensions", "named `default` will be executed. You can use as many presets # in", "disable only parts of the indexes plugin. # Use with care. # DISABLE_INDEXES", "RSS filename base (without extension); used for indexes and galleries. # (translatable) #", "them. (translatable) # (Bootstrap 4: right-side of navbar, Bootblog 4: right side of", "'listings'} # Which means process listings from 'listings' into 'output/listings' # A mapping", "in <slug>/index.html. # No web server configuration is required. Also enables STRIP_INDEXES. #", "\"\" # I recommend using the Creative Commons' wizard: # https://creativecommons.org/choose/ # LICENSE", "# displayMath: [ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ], # processEscapes: true # }, # displayAlign:", "to use. Be careful :-) # DISABLED_PLUGINS = [\"render_galleries\"] # Special settings to", "of the Markdown and reST compilers in the # past. Useful especially if", "Also enables STRIP_INDEXES. # This can be disabled on a per-page/post basis by", "utf-8 -*- import time # !! This is the configuration of Nikola. !!", "Commons' wizard: # https://creativecommons.org/choose/ # LICENSE = \"\"\" # <a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> #", "for page folders? # WARNING: if a page would conflict with the index", "Show image in `featured_large` on mobile. # `featured_small` displays them only on desktop.", "generated. HIDDEN_TAGS = ['mathjax'] # If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can", "(i.e. '\\//\\\\' is a path specifying the # subcategory called '\\' of the", "# (Uses the INDEX_FILE setting, so if that is, say, default.html, # it", "it, Bootstrap already does. # DATE_FANCINESS = 0 # Customize the locale/region used", "to the server root (!) that will be asked to be excluded #", "# (translatable) BLOG_TITLE = \"My Nikola Site\" # (translatable) # This is the", "Nikola-specific extensions are done via the Nikola plugin system, # with the MarkdownExtension", "\"page.tmpl\"), (\"pages/*.md\", \"\", \"page.tmpl\"), (\"pages/*.txt\", \"\", \"page.tmpl\"), (\"pages/*.html\", \"\", \"page.tmpl\"), ) # Below", "put them in <slug>/index.html. # No web server configuration is required. Also enables", "the same way NAVIGATION_LINKS does, # although themes may not always support them.", "# Example for another language: # \"es\": \"./es\", } # What will translated", "to any list, but don't add any new # compilers unless you write", "other alternatives: default.html for IIS, index.php # INDEX_FILE = \"index.html\" # If a", "Optional HTML that displayed on “main” blog index.html files. # May be used", "more folders containing files to be copied as-is into the output. # The", "Your theme must support it, Bootstrap already does. # DATE_FANCINESS = 0 #", "(no config at all) # MARKDOWN_EXTENSION_CONFIGS = {} # Social buttons. This is", "output/foo/from.html that redirects # to the \"/bar/to.html\" URL. notice that the \"from\" side", "addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style\"> # <a class=\"addthis_button_more\">Share</a> # <ul><li><a class=\"addthis_button_facebook\"></a> # <li><a class=\"addthis_button_google_plusone_share\"></a>", "# SCHEDULE_RULE = '' # If True, use the scheduling rule to all", "'draft', 'mathjax' and 'private' have special # meaning. If set to False, these", "\"$$\", right: \"$$\", display: true}, # {left: \"\\\\\\\\[\", right: \"\\\\\\\\]\", display: true}, #", "absolute path, it will be considered as relative # to the location of", "absolute paths to directories containing plugins to use them. # For example, the", "path from the root # absolute: a complete URL (that includes the SITE_URL)", "galleries that don't have one # None: show a grey square # '/url/to/file':", "`v7` directory of your clone of the Nikola themes # repository. # EXTRA_THEMES_DIRS", "should edit it to your liking. !! # # Data about this site", "fragment describing the license, for the sidebar. # (translatable) LICENSE = \"\" #", "('/') to separate paths. Use a backslash ('\\') to escape # a forward", "\"\", \"page.tmpl\"), (\"pages/*.txt\", \"\", \"page.tmpl\"), (\"pages/*.html\", \"\", \"page.tmpl\"), ) # Below this point,", "# }); # </script> # \"\"\" # Want to use KaTeX instead of", "for a greeting. (translatable) FRONT_INDEX_HEADER = { DEFAULT_LANG: '' } # URLs to", "= True # Allows scheduling of posts using the rule specified here (new_post", "page and posts. # Tag pages will still be generated. HIDDEN_TAGS = ['mathjax']", "with many of the others. # \"pandoc\": ['.rst', '.md', '.txt'], } # Preferred", "you want support for the $.$ syntax (which may # conflict with running", "used. (translatable) # Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting # Example for presets: {'preset': True,", "List of regular expressions, links matching them will always be considered # valid", "possible to run # them without reconfiguring your server to recognize them. \"php\":", "or more folders containing code listings to be processed and published on #", "this to 'left' if you want left-aligned equations. # \"HTML-CSS\": { # styles:", "post titles are in <h1> tags too, for # example. # (defaults to", "on a per-page/post basis by adding # .. pretty_url: False # to the", "'full', 'long', 'medium', or 'short' # DATE_FORMAT = 'yyyy-MM-dd HH:mm' # Date format", "exclude resources from # /robots.txt and /sitemap.xml, and to inform search engines about", "= '<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' # 'Read more...' for the feeds, if FEED_TEASERS is", "help defining # those. # TEMPLATE_FILTERS = {} # Put in global_context things", "the template is about to be # rendered GLOBAL_CONTEXT_FILLER = [] # Settings", "\"\", \"page.tmpl\"), (\"pages/*.md\", \"\", \"page.tmpl\"), (\"pages/*.txt\", \"\", \"page.tmpl\"), (\"pages/*.html\", \"\", \"page.tmpl\"), ) #", "on GitHub. GITHUB_SOURCE_BRANCH = 'src' GITHUB_DEPLOY_BRANCH = 'master' # The name of the", "['.ipynb'], \"html\": ['.html', '.htm'], # PHP files are rendered the usual way (i.e.", "ping` from the `ping` # plugin (`nikola plugin -i ping`). Or run `nikola", "be referenced manually to be visible on the site # (the thumbnail has", "<hN> tags in HTML generated by certain compilers (reST/Markdown) # will be demoted", "navigation bar. (translatable) # This is a dict. The keys are languages, and", "'output' # One or more folders containing code listings to be processed and", "wrapped in \"---\" METADATA_FORMAT = \"YAML\" # If you do not want to", "for # example, you may use rsync: # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\"", "If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in # hierarchies.", "are published/deployed; not in indexes/sitemap # Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to", "# the browser UI color (eg. Chrome on Android). Other themes might also", "system you use. The default is # \"nikolademo\" which is a test account", "also want to use github_deploy (see below). # You can define multiple presets", "(not pages!) by default # SCHEDULE_ALL = False # Do you want to", "may not support every # feature yet, it's faster and the output looks", "# is in the manual. COMMENT_SYSTEM_ID = \"\" # Create index.html for page", "an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE = '' # If True, use", "languages to file-extensions that represent that language. # Feel free to add or", "# Preferred metadata format for new posts # \"YAML\": YAML wrapped in \"---\"", "can choose between different third party comment # systems. The following comment systems", "everything is optional # Post's dates are considered in UTC by default, if", "using DATE_FORMAT and TIMEZONE (without JS) # 1 = using LUXON_DATE_FORMAT and local", "path specifying the # subcategory called '\\' of the top-level category called '/').", "by setting slug to `index`), the PAGE_INDEX # will not be generated for", "\"https://example.com/\" # This is the URL where Nikola's output will be deployed. #", "displayed on “main” blog index.html files. # May be used for a greeting.", "--> # \"\"\" # Show link to source for the posts? SHOW_SOURCELINK =", "metadata. PRETTY_URLS = False # If True, publish future dated posts right away", "tag for inclusion in RSS feeds that works just # like CONTENT_FOOTER and", "specified, # using a forward slash ('/') to separate paths. Use a backslash", "to `index`), the PAGE_INDEX # will not be generated for that directory. #", "gallery pages? # COMMENTS_IN_GALLERIES = False # What file should be used for", "format is a dictionary of {source: relative destination}. # Default is: # FILES_FOLDERS", "'.md', '.txt'], } # Preferred metadata format for new posts # \"YAML\": YAML", "disabled by default as it would conflict # with many of the others.", "# compilers unless you write the interface for it yourself. # # The", "“2 days ago” (JS, using Luxon) # # Your theme must support it,", "# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable) INDEX_READ_MORE_LINK", "via the Nikola plugin system, # with the MarkdownExtension class and should not", "# to `nikola deploy`. If no arguments are specified, a preset # named", "prominent link. Don't forget the protocol (http/https)! SITE_URL = \"https://example.com/\" # This is", "display: true}, # {left: \"\\\\\\\\[\", right: \"\\\\\\\\]\", display: true}, # {left: \"\\\\\\\\begin{equation*}\", right:", "bottom of <body> # in the default template (base.tmpl). # (translatable) # BODY_END", "string like “2 days ago” (JS, using Luxon) # # Your theme must", "# plugin (`nikola plugin -i ping`). Or run `nikola check -l`. # You", "that directory. # PAGE_INDEX = False # Enable comments on pages (i.e. not", "empty (which is # the default right now) # (translatable) # SOCIAL_BUTTONS_CODE =", "on filters if you need help defining # those. # TEMPLATE_FILTERS = {}", "the name of the leaf category and not the whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY =", "comments. COMMENT_SYSTEM = \"\" # And you also need to add your COMMENT_SYSTEM_ID", "# Add the absolute paths to directories containing themes to use them. #", "checking that # migration was successful. WARN_ABOUT_TAG_METADATA = False # Templates will use", "LICENSE = \"\"\" # <a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img alt=\"Creative Commons License BY-NC-SA\"", "# OUTPUT_FOLDER = 'output' # where the \"cache\" of partial generated content should", "just use this config: # MATHJAX_CONFIG = \"\"\" # <script type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({", "you write the interface for it yourself. # # The default compiler for", "presets and specify them as arguments # to `nikola deploy`. If no arguments", "<h1> tags too, for # example. # (defaults to 1.) # DEMOTE_HEADERS =", "GLOBAL_CONTEXT as parameter when the template is about to be # rendered GLOBAL_CONTEXT_FILLER", "coding: utf-8 -*- import time # !! This is the configuration of Nikola.", "pages (i.e. not posts)? # COMMENTS_IN_PAGES = False # Enable comments on picture", "RSS feeds that works just # like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT = 'Contents", "pages location TRANSLATIONS = { DEFAULT_LANG: \"\", # Example for another language: #", "in \"---\" METADATA_FORMAT = \"YAML\" # If you do not want to display", "If ENABLE_AUTHOR_PAGES is set to True and there is more than one #", "file (usually # caused by setting slug to `index`), the PAGE_INDEX # will", "some browsers as # the browser UI color (eg. Chrome on Android). Other", "\"posts\", \"post.tmpl\"), ) PAGES = ( (\"pages/*.rst\", \"\", \"page.tmpl\"), (\"pages/*.md\", \"\", \"page.tmpl\"), (\"pages/*.txt\",", "unless you write the interface for it yourself. # # The default compiler", "# The author will not be displayed on the author list page and", "that our use of \"server side includes\" / partials # REQUIRES the use", "the manual. COMMENT_SYSTEM_ID = \"\" # Create index.html for page folders? # WARNING:", "server root (!) that will be asked to be excluded # from indexing", "SCHEDULE_ALL = False # Do you want to add a Mathjax config file?", "\"$$\", display: true}, # {left: \"\\\\\\\\[\", right: \"\\\\\\\\]\", display: true}, # {left: \"\\\\\\\\begin{equation*}\",", "# BODY_END = \"\" # Bundle JS and CSS into single files to", "between different third party comment # systems. The following comment systems are supported", "tags too, for # example. # (defaults to 1.) # DEMOTE_HEADERS = 1", "FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced # option used for traffic source", "['.t2t'], \"bbcode\": ['.bb'], \"wiki\": ['.wiki'], \"ipynb\": ['.ipynb'], \"html\": ['.html', '.htm'], # PHP files", "gallery's index) in # list of galleries for each gallery GALLERIES_USE_THUMBNAIL = False", "site # (the thumbnail has ``.thumbnail`` added before the file extension by default,", "the license, for the sidebar. # (translatable) LICENSE = \"\" # I recommend", "extensions (See https://python-markdown.github.io/reference/) # Default is {} (no config at all) # MARKDOWN_EXTENSION_CONFIGS", "False # If True, future dated posts are allowed in deployed output #", "['.php'], # Pandoc detects the input from the source filename # but is", "TIMEZONE = \"Europe/London\" # Date format used to display post dates. (translatable) #", "where Nikola's output will be deployed. # If not set, defaults to SITE_URL", "(without extension); used for indexes and galleries. # (translatable) # RSS_FILENAME_BASE = \"rss\"", "not posts)? # COMMENTS_IN_PAGES = False # Enable comments on picture gallery pages?", "the metadata. PRETTY_URLS = False # If True, publish future dated posts right", "anything you want here, or even make it empty (which is # the", "the deployment branch on GitHub. GITHUB_SOURCE_BRANCH = 'src' GITHUB_DEPLOY_BRANCH = 'master' # The", "for presets: {'preset': True, 'format': 'DATE_FULL'} # LUXON_DATE_FORMAT = { # DEFAULT_LANG: {'preset':", "!! You should edit it to your liking. !! # # Data about", "reST compilers in the # past. Useful especially if your post titles are", "Append a URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced", "default right now) # (translatable) # SOCIAL_BUTTONS_CODE = \"\"\" # <!-- Social buttons", "is used. # Defaults to True. # USE_BUNDLES = True USE_BUNDLES = False", "A HTML fragment describing the license, for the sidebar. # (translatable) LICENSE =", "# hierarchies. For a post, the whole path in the hierarchy must be", "= False COPY_SOURCES = False # Modify the number of Post per Index", "hidden. # The category will not be displayed on the category list page.", "as # the browser UI color (eg. Chrome on Android). Other themes might", "to be the same value. # DEPLOY_FUTURE = False # If False, draft", "base (without extension); used for indexes and galleries. # (translatable) # RSS_FILENAME_BASE =", "RSS_FILENAME_BASE = \"rss\" # Atom filename base (without extension); used for indexes. #", "index.html part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses the INDEX_FILE setting, so if", "you can choose between different third party comment # systems. The following comment", "be generated. HIDDEN_AUTHORS = ['Guest'] # Optional HTML that displayed on “main” blog", "of {source: relative destination}. # Default is: # LISTINGS_FOLDERS = {'listings': 'listings'} #", "output # contains only the name of the leaf category and not the", "the MarkdownExtension class and should not be added here. # Defaults are markdown.extensions.(fenced_code|codehilite|extra)", "the source branch automatically # before deploying. GITHUB_COMMIT_SOURCE = True # Where the", "Page # Defaults to 10 # INDEX_DISPLAY_POST_COUNT = 10 # Extra things you", "you want support for the $.$ syntax (which may conflict with running #", "directories containing plugins to use them. # For example, the `plugins` directory of", "<slug>.html, put them in <slug>/index.html. # No web server configuration is required. Also", "read the manual: # https://getnikola.com/handbook.html#deploying-to-github # You will need to configure the deployment", "Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value. # DEPLOY_FUTURE", "points to server root. The list is used to exclude resources from #", "the `v7` directory of your clone of the Nikola themes # repository. #", "# URLs to other posts/pages can take 3 forms: # rel_path: a relative", "the Nikola plugins # repository. # EXTRA_PLUGINS_DIRS = [] # Add the absolute", "or a backslash (i.e. '\\//\\\\' is a path specifying the # subcategory called", "past. Useful especially if your post titles are in <h1> tags too, for", "MUST be a # relative URL. # # If you don't need any", "to the current page/post (default) # full_path: a URL with the full path", "\"disimplex\" # A theme color. In default themes, it might be displayed by", "bootstrap4 supports: navbar_light (defaults to False) # navbar_custom_bg (defaults to '') # Config", "in HTML generated by certain compilers (reST/Markdown) # will be demoted by that", "to enable? # You will also get gist, nikola and podcast because those", "False, # Show the first (remaining) two featured posts in small boxes. 'featured_small':", "False, # Contents of the sidebar, If empty, the sidebar is not displayed.", "English: LOCALES = {'en': 'en_GB'} # LOCALES = {} # One or more", "Social buttons --> # <div id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style\"> # <a", "previewimage as its background. 'featured_large': False, # Show the first (remaining) two featured", "= '#5670d4' # Theme configuration. Fully theme-dependent. (translatable) # Samples for bootblog4 (enabled)", "(remaining) two featured posts in small boxes. 'featured_small': False, # Show featured posts", "# The default compiler for `new_post` is the first entry in the POSTS", "href=\"mailto:{email}\">{author}</a> - Powered by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}' # Things that will be", "say, default.html, # it will instead /foo/default.html => /foo) STRIP_INDEXES = False #", "COPY_SOURCES = False # Modify the number of Post per Index Page #", "of conf.py # OUTPUT_FOLDER = 'output' # where the \"cache\" of partial generated", "= False # Copy the source files for your pages? # Setting it", "can define multiple presets and specify them as arguments # to `nikola deploy`.", "on Android). Other themes might also use it # as an accent color", "(without JS) # 1 = using LUXON_DATE_FORMAT and local user time (JS, using", "(defaults to False) # navbar_custom_bg (defaults to '') # Config for bootblog4: THEME_CONFIG", "no arguments are specified, a preset # named `default` will be executed. You", "filters if you need help defining # those. # TEMPLATE_FILTERS = {} #", "in UTC by default, if you want to use # another time zone,", "# depends on what comment system you use. The default is # \"nikolademo\"", "</head> # (translatable) # EXTRA_HEAD_DATA = \"\" # Google Analytics or whatever else", "information # is in the manual. COMMENT_SYSTEM_ID = \"\" # Create index.html for", "set to True, categories can be organized in # hierarchies. For a post,", "small copyright notice for the page footer (in HTML). # (translatable) CONTENT_FOOTER =", "may use rsync: # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\" # And then do", "it will be considered as relative # to the location of conf.py #", "One or more folders containing files to be copied as-is into the output.", "COMMENT_SYSTEM = \"\" # And you also need to add your COMMENT_SYSTEM_ID which", "'mathjax' # and 'private' tags are found in a post. Useful for checking", "[] REDIRECTIONS = [] # Presets of commands to execute to deploy. Can", "CONTENT_FOOTER_FORMATS # To use comments, you can choose between different third party comment", "\"post.tmpl\"), (\"posts/*.html\", \"posts\", \"post.tmpl\"), ) PAGES = ( (\"pages/*.rst\", \"\", \"page.tmpl\"), (\"pages/*.md\", \"\",", "# Where the output site should be located # If you don't use", "category will not be displayed on the category list page. # Category pages", "Index Page # Defaults to 10 # INDEX_DISPLAY_POST_COUNT = 10 # Extra things", "on the tag list page and posts. # Tag pages will still be", "FEED_TEASERS is True (translatable) FEED_READ_MORE_LINK = '<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' # Append a URL", "site BLOG_AUTHOR = \"<NAME>\" # (translatable) BLOG_TITLE = \"My Nikola Site\" # (translatable)", "'{path}.{lang}.{ext}' # Links for the sidebar / navigation bar. (translatable) # This is", "What will translated input files be named like? TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' # Links", "'' } # URLs to other posts/pages can take 3 forms: # rel_path:", "directory of your clone of the Nikola themes # repository. # EXTRA_THEMES_DIRS =", "set to True, the tags 'draft', 'mathjax' and 'private' have special # meaning.", "files be named like? TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' # Links for the sidebar /", "not github_deploy should commit to the source branch automatically # before deploying. GITHUB_COMMIT_SOURCE", "\"php\": ['.php'], # Pandoc detects the input from the source filename # but", "filename base (without extension); used for indexes. # (translatable) ATOM_FILENAME_BASE = \"feed\" #", "DEPLOY_DRAFTS = True # Allows scheduling of posts using the rule specified here", "BLOG_DESCRIPTION = \"This is a demo site for Nikola.\" # (translatable) # What", "the remote where you wish to push to, using github_deploy. GITHUB_REMOTE_NAME = 'origin'", "styles: {'.MathJax_Display': {\"margin\": 0}} # } # }); # </script> # \"\"\" #", "conflict with running text!), just use this config: # KATEX_AUTO_RENDER = \"\"\" #", "directory indexes? # Defaults to index.html # Common other alternatives: default.html for IIS,", "{ DEFAULT_LANG: () } # Name of the theme to use. #THEME =", "display a category publicly, you can mark it as hidden. # The category", "\"\"\" # Want to use KaTeX instead of MathJax? While KaTeX may not", "true # }, # displayAlign: 'center', // Change this to 'left' if you", "for directory indexes? # Defaults to index.html # Common other alternatives: default.html for", "match. Check the available # list from Wikipedia: TIMEZONE = \"Europe/London\" # Date", "-s) # Specify an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE = '' #", "= {'en': 'en_GB'} # LOCALES = {} # One or more folders containing", "\"bbcode\": ['.bb'], \"wiki\": ['.wiki'], \"ipynb\": ['.ipynb'], \"html\": ['.html', '.htm'], # PHP files are", "plugins to use them. # For example, the `plugins` directory of your clone", "alternatives: default.html for IIS, index.php # INDEX_FILE = \"index.html\" # If a link", "h2 and so on) # This was a hidden feature of the Markdown", "[] # If ENABLE_AUTHOR_PAGES is set to True and there is more than", "add a Mathjax config file? # MATHJAX_CONFIG = \"\" # If you want", "src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A small copyright notice for the page footer (in HTML). #", "muut, commento, utterances # You can leave this option blank to disable comments.", "\"page.tmpl\"), ) # Below this point, everything is optional # Post's dates are", "slash ('/') to separate paths. Use a backslash ('\\') to escape # a", "theme must support it, Bootstrap already does. # DATE_FANCINESS = 0 # Customize", "dictionary of {source: relative destination}. # Default is: # FILES_FOLDERS = {'files': ''}", "\"\", \"page.tmpl\"), (\"pages/*.html\", \"\", \"page.tmpl\"), ) # Below this point, everything is optional", "# Tag pages will still be generated. HIDDEN_AUTHORS = ['Guest'] # Optional HTML", "use of \"server side includes\" / partials # REQUIRES the use of 'full_path'", "For example, the `plugins` directory of your clone of the Nikola plugins #", "the first entry in the POSTS tuple. # # 'rest' is reStructuredText #", "author, author pages are generated. ENABLE_AUTHOR_PAGES = False # If you do not", "# {left: \"$\", right: \"$\", display: false}, # {left: \"\\\\\\\\(\", right: \"\\\\\\\\)\", display:", "# A theme color. In default themes, it might be displayed by some", "# RSS_FILENAME_BASE = \"rss\" # Atom filename base (without extension); used for indexes.", "\"posts\", \"post.tmpl\"), (\"posts/*.md\", \"posts\", \"post.tmpl\"), (\"posts/*.txt\", \"posts\", \"post.tmpl\"), (\"posts/*.html\", \"posts\", \"post.tmpl\"), ) PAGES", "categories can be organized in # hierarchies. For a post, the whole path", "KaTeX may not support every # feature yet, it's faster and the output", "= False # What file should be used for directory indexes? # Defaults", "(which may # conflict with running text!), just use this config: # KATEX_AUTO_RENDER", "by certain compilers (reST/Markdown) # will be demoted by that much (1 →", "to the source branch automatically # before deploying. GITHUB_COMMIT_SOURCE = True # Where", "featured post in a large box, with the previewimage as its background. 'featured_large':", "# Contents of the sidebar, If empty, the sidebar is not displayed. 'sidebar':", "time (JS, using Luxon) # 2 = using a string like “2 days", "= 'master' # The name of the remote where you wish to push", "KATEX_AUTO_RENDER = \"\"\" # delimiters: [ # {left: \"$$\", right: \"$$\", display: true},", "directory of your clone of the Nikola plugins # repository. # EXTRA_PLUGINS_DIRS =", "in deployed output # Only the individual posts are published/deployed; not in indexes/sitemap", "# (translatable) # What is the default language? DEFAULT_LANG = \"en\" # What", "page/post (default) # full_path: a URL with the full path from the root", "# ROBOTS_EXCLUSIONS = [\"/archive.html\", \"/category/*.html\"] # Instead of putting files in <slug>.html, put", "them. # Defaults to False. # FUTURE_IS_NOW = False # If True, future", "Tag pages will still be generated. HIDDEN_TAGS = ['mathjax'] # If CATEGORY_ALLOW_HIERARCHIES is", "it to your liking. !! # # Data about this site BLOG_AUTHOR =", "compilers unless you write the interface for it yourself. # # The default", "Disqus. More information # is in the manual. COMMENT_SYSTEM_ID = \"\" # Create", "'/'). CATEGORY_ALLOW_HIERARCHIES = False # If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output", "it's faster and the output looks better. # USE_KATEX = False # KaTeX", "INDEX_FILE setting, so if that is, say, default.html, # it will instead /foo/default.html", "is sample code for AddThis (which was the default for a # long", "forget the protocol (http/https)! SITE_URL = \"https://example.com/\" # This is the URL where", "files to make site loading faster in a HTTP/1.1 # environment but is", "= 'rel_path' # # Note that our use of \"server side includes\" /", "displayed on the author list page and posts. # Tag pages will still", "hidden feature of the Markdown and reST compilers in the # past. Useful", "intensedebate, isso, muut, commento, utterances # You can leave this option blank to", "# Customize the locale/region used for a language. # For example, to use", "GALLERIES_DEFAULT_THUMBNAIL = None # Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and", "Can be anything, for # example, you may use rsync: # \"rsync -rav", "[] # The <hN> tags in HTML generated by certain compilers (reST/Markdown) #", "caching is used. # Defaults to True. # USE_BUNDLES = True USE_BUNDLES =", "locale/region used for a language. # For example, to use British instead of", "image in `featured_large` on mobile. # `featured_small` displays them only on desktop. 'featured_large_image_on_mobile':", "# What other languages do you have? # The format is {\"translationcode\" :", "IMAGE_FOLDERS = {'images': 'images'} # IMAGE_THUMBNAIL_SIZE = 400 # IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}' #", "= \"\" # I recommend using the Creative Commons' wizard: # https://creativecommons.org/choose/ #", "!! This is the configuration of Nikola. !! # # !! You should", "COMMENT_SYSTEM_ID = \"\" # Create index.html for page folders? # WARNING: if a", "of posts using the rule specified here (new_post -s) # Specify an iCal", "# Add the absolute paths to directories containing plugins to use them. #", "# If you do not want to display a category publicly, you can", "tuple. # # 'rest' is reStructuredText # 'markdown' is Markdown # 'html' assumes", "to disable comments. COMMENT_SYSTEM = \"\" # And you also need to add", "be the same value. # DEPLOY_FUTURE = False # If False, draft posts", "`nikola check -l`. # You may also want to use github_deploy (see below).", "in # list of galleries for each gallery GALLERIES_USE_THUMBNAIL = False # Image", "CONTENT_FOOTER_FORMATS RSS_COPYRIGHT = 'Contents © {date} <a href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN = 'Contents ©", "directories containing themes to use them. # For example, the `v7` directory of", "# What Markdown extensions to enable? # You will also get gist, nikola", "URL. # # If you don't need any of these, just set to", "it as hidden. # The category will not be displayed on the category", "your engine's documentation on filters if you need help defining # those. #", "], # displayMath: [ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ], # processEscapes: true # }, #", "False # Copy the source files for your pages? # Setting it to", "# Atom filename base (without extension); used for indexes. # (translatable) ATOM_FILENAME_BASE =", "= 'Contents © {date} {author} {license}' RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS # To use comments,", "of {source: relative destination}. # Default is: # FILES_FOLDERS = {'files': ''} #", "True, publish future dated posts right away instead of scheduling them. # Defaults", "Check the available # list from Wikipedia: TIMEZONE = \"Europe/London\" # Date format", "`default` will be executed. You can use as many presets # in a", "run # them without reconfiguring your server to recognize them. \"php\": ['.php'], #", "use. #THEME = \"bootblog4\" THEME = \"disimplex\" # A theme color. In default", "Extra things you want in the pages HEAD tag. This will be added", "= [\"/archive.html\", \"/category/*.html\"] # Instead of putting files in <slug>.html, put them in", "only parts of the indexes plugin. # Use with care. # DISABLE_INDEXES =", "href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}' RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS #", "to use them. # For example, the `plugins` directory of your clone of", "\"\", # Example for another language: # \"es\": \"./es\", } # What will", "the Nikola plugin system, # with the MarkdownExtension class and should not be", "is Markdown # 'html' assumes the file is HTML and just copies it", "# No web server configuration is required. Also enables STRIP_INDEXES. # This can", "# bootblog4 supports: featured_large featured_small featured_on_mobile # featured_large_image_on_mobile featured_strip_html sidebar # bootstrap4 supports:", "following comment systems are supported by Nikola: # disqus, facebook, intensedebate, isso, muut,", "(the default ones don’t). Must be a HEX value. THEME_COLOR = '#5670d4' #", "want left-aligned equations. # \"HTML-CSS\": { # styles: {'.MathJax_Display': {\"margin\": 0}} # }", "interface for it yourself. # # The default compiler for `new_post` is the", "will need to configure the deployment branch on GitHub. GITHUB_SOURCE_BRANCH = 'src' GITHUB_DEPLOY_BRANCH", "those. # TEMPLATE_FILTERS = {} # Put in global_context things you want available", "False # If True, publish future dated posts right away instead of scheduling", "<body> # in the default template (base.tmpl). # (translatable) # BODY_END = \"\"", "pages HEAD tag. This will be added right # before </head> # (translatable)", "TEMPLATE_FILTERS = {} # Put in global_context things you want available on all", "# If True, use the scheduling rule to all posts (not pages!) by", "IMAGE_THUMBNAIL_SIZE = 400 # IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}' # ############################################################################# # HTML fragments and", "Nikola's output will be deployed. # If not set, defaults to SITE_URL #", "are languages, and values are tuples. NAVIGATION_LINKS = { DEFAULT_LANG: ( (\"/archive.html\", \"Archive\"),", "# If True, future dated posts are allowed in deployed output # Only", "will still be generated. HIDDEN_AUTHORS = ['Guest'] # Optional HTML that displayed on", "slug to `index`), the PAGE_INDEX # will not be generated for that directory.", "# like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT = 'Contents © {date} <a href=\"mailto:{email}\">{author}</a> {license}'", "output written to output # contains only the name of the leaf category", "Change this to 'left' if you want left-aligned equations. # \"HTML-CSS\": { #", "# The category will not be displayed on the category list page. #", "escape # a forward slash or a backslash (i.e. '\\//\\\\' is a path", "10 # INDEX_DISPLAY_POST_COUNT = 10 # Extra things you want in the pages", "# The format is a dictionary of {source: relative destination}. # Default is:", "False, # Show featured posts on mobile. 'featured_on_mobile': True, # Show image in", "=> http://mysite/foo/bar/ # (Uses the INDEX_FILE setting, so if that is, say, default.html,", "index.html for page folders? # WARNING: if a page would conflict with the", "# featured_large_image_on_mobile featured_strip_html sidebar # bootstrap4 supports: navbar_light (defaults to False) # navbar_custom_bg", "detects the input from the source filename # but is disabled by default", "this point, everything is optional # Post's dates are considered in UTC by", "# Which means copy 'files' into 'output' # One or more folders containing", "regular tags. USE_TAG_METADATA = False # If set to True, a warning is", "the path will be used as a prefix for the generated pages location", "on mobile. # `featured_small` displays them only on desktop. 'featured_large_image_on_mobile': True, # Strip", "a forward slash ('/') to separate paths. Use a backslash ('\\') to escape", "# Extra things you want in the pages HEAD tag. This will be", "of the 'draft', 'mathjax' # and 'private' tags are found in a post.", "= False # Templates will use those filters, along with the defaults. #", "extension); used for indexes and galleries. # (translatable) # RSS_FILENAME_BASE = \"rss\" #", "used for indexes and galleries. # (translatable) # RSS_FILENAME_BASE = \"rss\" # Atom", "# For example, the `v7` directory of your clone of the Nikola themes", "to False, these tags are handled like regular tags. USE_TAG_METADATA = False #", "in # hierarchies. For a post, the whole path in the hierarchy must", "name of the leaf category and not the whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY = False", "the # subcategory called '\\' of the top-level category called '/'). CATEGORY_ALLOW_HIERARCHIES =", "(translatable) INDEX_READ_MORE_LINK = '<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' # 'Read more...' for the feeds, if", "HTML and just copies it COMPILERS = { \"rest\": ['.rst', '.txt'], \"markdown\": ['.md',", "'cache' # CACHE_FOLDER = 'cache' # ############################################################################# # Image Gallery Options # #############################################################################", "You can leave this option blank to disable comments. COMMENT_SYSTEM = \"\" #", "# You can define multiple presets and specify them as arguments # to", "two featured posts in small boxes. 'featured_small': False, # Show featured posts on", "# Google Analytics or whatever else you use. Added to the bottom of", "# text!), just use this config: # MATHJAX_CONFIG = \"\"\" # <script type=\"text/x-mathjax-config\">", "themes might also use it # as an accent color (the default ones", "referenced manually to be visible on the site # (the thumbnail has ``.thumbnail``", "local user time (JS, using Luxon) # 2 = using a string like", "# Consult your engine's documentation on filters if you need help defining #", "careful :-) # DISABLED_PLUGINS = [\"render_galleries\"] # Special settings to disable only parts", "of the theme to use. #THEME = \"bootblog4\" THEME = \"disimplex\" # A", "a per-page/post basis by adding # .. pretty_url: False # to the metadata.", "considered in UTC by default, if you want to use # another time", "the main URL for your site. It will be used # in a", "as relative # to the location of conf.py # OUTPUT_FOLDER = 'output' #", "(translatable) # Samples for bootblog4 (enabled) and bootstrap4 (commented) follow. # bootblog4 supports:", "fragments and diverse things that are used by the templates # ############################################################################# #", "= \"This is a demo site for Nikola.\" # (translatable) # What is", "to run # them without reconfiguring your server to recognize them. \"php\": ['.php'],", "filename base (without extension); used for indexes and galleries. # (translatable) # RSS_FILENAME_BASE", "with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS = {'images': 'images'} # IMAGE_THUMBNAIL_SIZE = 400 # IMAGE_THUMBNAIL_FORMAT =", "} # Preferred metadata format for new posts # \"YAML\": YAML wrapped in", "(\"/rss.xml\", \"RSS feed\"), ), } # Alternative navigation links. Works the same way", "in a prominent link. Don't forget the protocol (http/https)! SITE_URL = \"https://example.com/\" #", "the Creative Commons' wizard: # https://creativecommons.org/choose/ # LICENSE = \"\"\" # <a rel=\"license\"", "execute to deploy. Can be anything, for # example, you may use rsync:", "# What will translated input files be named like? TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' #", "# URL_TYPE = 'rel_path' # # Note that our use of \"server side", "--delete output/ joe@my.site:/srv/www/site\" # And then do a backup, or run `nikola ping`", "PAGES contains (wildcard, destination, template) tuples. # (translatable) # POSTS = ( (\"posts/*.rst\",", "Show the latest featured post in a large box, with the previewimage as", "that represent that language. # Feel free to add or delete extensions to", "pages will still be generated. HIDDEN_TAGS = ['mathjax'] # If CATEGORY_ALLOW_HIERARCHIES is set", "will not be deployed # DEPLOY_DRAFTS = True # Allows scheduling of posts", "it COMPILERS = { \"rest\": ['.rst', '.txt'], \"markdown\": ['.md', '.mdown', '.markdown'], \"textile\": ['.textile'],", "LUXON_DATE_FORMAT = { # DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'}, # } #", "enables STRIP_INDEXES. # This can be disabled on a per-page/post basis by adding", "use. Added to the bottom of <body> # in the default template (base.tmpl).", "our use of \"server side includes\" / partials # REQUIRES the use of", "# (translatable) # RSS_FILENAME_BASE = \"rss\" # Atom filename base (without extension); used", "want to use # another time zone, please set TIMEZONE to match. Check", "scheduling of posts using the rule specified here (new_post -s) # Specify an", "(eg. Chrome on Android). Other themes might also use it # as an", "using LUXON_DATE_FORMAT and local user time (JS, using Luxon) # 2 = using", "LOCALES = {} # One or more folders containing files to be copied", "you use. The default is # \"nikolademo\" which is a test account for", "your site. It will be used # in a prominent link. Don't forget", "the default for a # long time). Insert anything you want here, or", "What Markdown extensions to enable? # You will also get gist, nikola and", "caused by setting slug to `index`), the PAGE_INDEX # will not be generated", "<a href=\"mailto:{email}\">{author}</a> - Powered by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}' # Things that will", "podcast because those are # done in the code, hope you don't mind", "<script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!-- End of social buttons --> # \"\"\" # Show", "have special # meaning. If set to False, these tags are handled like", "(base.tmpl). # (translatable) # BODY_END = \"\" # Bundle JS and CSS into", "True, # Strip HTML from featured post text. 'featured_strip_html': False, # Contents of", "USE_BUNDLES = True USE_BUNDLES = False # Plugins you don't want to use.", "MathJax.Hub.Config({ # tex2jax: { # inlineMath: [ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ], # displayMath: [", "is the first entry in the POSTS tuple. # # 'rest' is reStructuredText", "license, for the sidebar. # (translatable) LICENSE = \"\" # I recommend using", "to escape # a forward slash or a backslash (i.e. '\\//\\\\' is a", "# (translatable) # BODY_END = \"\" # Bundle JS and CSS into single", "class=\"addthis_button_facebook\"></a> # <li><a class=\"addthis_button_google_plusone_share\"></a> # <li><a class=\"addthis_button_linkedin\"></a> # <li><a class=\"addthis_button_twitter\"></a> # </ul> #", "faster in a HTTP/1.1 # environment but is not recommended for HTTP/2.0 when", "choose between different third party comment # systems. The following comment systems are", "{left: \"$\", right: \"$\", display: false}, # {left: \"\\\\\\\\(\", right: \"\\\\\\\\)\", display: false}", "(translatable) # (Bootstrap 4: right-side of navbar, Bootblog 4: right side of title)", "to be referenced manually to be visible on the site # (the thumbnail", "`ping` # plugin (`nikola plugin -i ping`). Or run `nikola check -l`. #", "for the posts? SHOW_SOURCELINK = False # Copy the source files for your", "sidebar. # (translatable) LICENSE = \"\" # I recommend using the Creative Commons'", "( (\"posts/*.rst\", \"posts\", \"post.tmpl\"), (\"posts/*.md\", \"posts\", \"post.tmpl\"), (\"posts/*.txt\", \"posts\", \"post.tmpl\"), (\"posts/*.html\", \"posts\", \"post.tmpl\"),", "for bootblog4: THEME_CONFIG = { DEFAULT_LANG: { # Show the latest featured post", "# Social buttons. This is sample code for AddThis (which was the default", "# repository. # EXTRA_THEMES_DIRS = [] # List of regular expressions, links matching", "it possible to run # them without reconfiguring your server to recognize them.", "on the site # (the thumbnail has ``.thumbnail`` added before the file extension", "always be considered # valid by \"nikola check -l\" # LINK_CHECK_WHITELIST = []", "= 0 # Customize the locale/region used for a language. # For example,", "'private' have special # meaning. If set to False, these tags are handled", "= \"index.html\" # If a link ends in /index.html, drop the index.html part.", "If not set, defaults to SITE_URL # BASE_URL = \"https://example.com/\" BLOG_EMAIL = \"<EMAIL>\"", "called '/'). CATEGORY_ALLOW_HIERARCHIES = False # If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the", "'yyyy-MM-dd HH:mm'}, # } # Date fanciness. # # 0 = using DATE_FORMAT", "source files for your pages? # Setting it to False implies SHOW_SOURCELINK =", "them. # For example, the `v7` directory of your clone of the Nikola", "The format is {\"translationcode\" : \"path/to/translation\" } # the path will be used", "full_path: a URL with the full path from the root # absolute: a", "all posts (not pages!) by default # SCHEDULE_ALL = False # Do you", "generated. ENABLE_AUTHOR_PAGES = False # If you do not want to display an", "HTML file will be created in output/foo/from.html that redirects # to the \"/bar/to.html\"", "# ############################################################################# # 'Read more...' for the index page, if INDEX_TEASERS is True", "matching them will always be considered # valid by \"nikola check -l\" #", "please set TIMEZONE to match. Check the available # list from Wikipedia: TIMEZONE", "was the default for a # long time). Insert anything you want here,", "generated pages location TRANSLATIONS = { DEFAULT_LANG: \"\", # Example for another language:", "feeds that works just # like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT = 'Contents ©", "tag list page and posts. # Tag pages will still be generated. HIDDEN_TAGS", "# valid by \"nikola check -l\" # LINK_CHECK_WHITELIST = [] # The <hN>", "navbar_custom_bg (defaults to '') # Config for bootblog4: THEME_CONFIG = { DEFAULT_LANG: {", "the available # list from Wikipedia: TIMEZONE = \"Europe/London\" # Date format used", "display: false}, # {left: \"\\\\\\\\(\", right: \"\\\\\\\\)\", display: false} # ] # \"\"\"", "will use those filters, along with the defaults. # Consult your engine's documentation", "the usual way (i.e. with the full templates). # The resulting files have", "more...' for the index page, if INDEX_TEASERS is True (translatable) INDEX_READ_MORE_LINK = '<p", "This is the URL where Nikola's output will be deployed. # If not", "posts/pages can take 3 forms: # rel_path: a relative URL to the current", "for those galleries that don't have one # None: show a grey square", "\"posts\", \"post.tmpl\"), (\"posts/*.html\", \"posts\", \"post.tmpl\"), ) PAGES = ( (\"pages/*.rst\", \"\", \"page.tmpl\"), (\"pages/*.md\",", "is a dictionary of {source: relative destination}. # Default is: # FILES_FOLDERS =", "for Atom feed files # ATOM_EXTENSION = \".atom\" # A list of redirection", "your liking. !! # # Data about this site BLOG_AUTHOR = \"<NAME>\" #", "# REQUIRES the use of 'full_path' # URL_TYPE = 'full_path' # Extension for", "= False # Image to use as thumbnail for those galleries that don't", "\"nikolademo\" which is a test account for Disqus. More information # is in", "// Change this to 'left' if you want left-aligned equations. # \"HTML-CSS\": {", "# contains only the name of the leaf category and not the whole", "If you do not want to display an author publicly, you can mark", "ago” (JS, using Luxon) # # Your theme must support it, Bootstrap already", "addthis_default_style addthis_label_style addthis_32x32_style\"> # <a class=\"addthis_button_more\">Share</a> # <ul><li><a class=\"addthis_button_facebook\"></a> # <li><a class=\"addthis_button_google_plusone_share\"></a> #", "-l`. # You may also want to use github_deploy (see below). # You", "} # Alternative navigation links. Works the same way NAVIGATION_LINKS does, # although", "# LISTINGS_FOLDERS = {'listings': 'listings'} # Which means process listings from 'listings' into", "(translatable) # BODY_END = \"\" # Bundle JS and CSS into single files", "'featured_small': False, # Show featured posts on mobile. 'featured_on_mobile': True, # Show image", "href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img alt=\"Creative Commons License BY-NC-SA\" # style=\"border-width:0; margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" #", "and 'private' tags are found in a post. Useful for checking that #", "{} # Add functions here and they will be called with template #", "# Date format used to display post dates, if local dates are used.", "post text. 'featured_strip_html': False, # Contents of the sidebar, If empty, the sidebar", "CONTENT_FOOTER_FORMATS = { DEFAULT_LANG: ( (), { \"email\": BLOG_EMAIL, \"author\": BLOG_AUTHOR, \"date\": time.gmtime().tm_year,", "\"\" # Bundle JS and CSS into single files to make site loading", "used for indexes. # (translatable) ATOM_FILENAME_BASE = \"feed\" # Extension for Atom feed", "# !! You should edit it to your liking. !! # # Data", "by \"nikola check -l\" # LINK_CHECK_WHITELIST = [] # The <hN> tags in", "but is disabled by default as it would conflict # with many of", "# absolute: a complete URL (that includes the SITE_URL) # URL_TYPE = 'rel_path'", "format used to display post dates, if local dates are used. (translatable) #", "posts)? # COMMENTS_IN_PAGES = False # Enable comments on picture gallery pages? #", "color (eg. Chrome on Android). Other themes might also use it # as", "[] # Settings for the (boot)Reveal theme must be added to the global", "= None # Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE", "plugin. # Use with care. # DISABLE_INDEXES = False # DISABLE_MAIN_ATOM_FEED = False", "(translatable) FEED_READ_MORE_LINK = '<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' # Append a URL query to the", "# Setting it to False implies SHOW_SOURCELINK = False COPY_SOURCES = False #", "navbar_light (defaults to False) # navbar_custom_bg (defaults to '') # Config for bootblog4:", "recognize them. \"php\": ['.php'], # Pandoc detects the input from the source filename", "it as hidden. # The tag will not be displayed on the tag", "Special settings to disable only parts of the indexes plugin. # Use with", "if you need help defining # those. # TEMPLATE_FILTERS = {} # Put", "from 'listings' into 'output/listings' # A mapping of languages to file-extensions that represent", "from indexing and other robotic spidering. * is supported. Will only be effective", "# If you don't use an absolute path, it will be considered as", "leave this option blank to disable comments. COMMENT_SYSTEM = \"\" # And you", "configure the deployment branch on GitHub. GITHUB_SOURCE_BRANCH = 'src' GITHUB_DEPLOY_BRANCH = 'master' #", "False # What file should be used for directory indexes? # Defaults to", "# And then do a backup, or run `nikola ping` from the `ping`", "comments, you can choose between different third party comment # systems. The following", "backup, or run `nikola ping` from the `ping` # plugin (`nikola plugin -i", "\"<NAME>\" # (translatable) BLOG_TITLE = \"My Nikola Site\" # (translatable) # This is", "\".xml\" # RSS filename base (without extension); used for indexes and galleries. #", "# ATOM_EXTENSION = \".atom\" # A list of redirection tuples, [(\"foo/from.html\", \"/bar/to.html\")]. #", "# Samples for bootblog4 (enabled) and bootstrap4 (commented) follow. # bootblog4 supports: featured_large", "all) # MARKDOWN_EXTENSION_CONFIGS = {} # Social buttons. This is sample code for", "Customize the locale/region used for a language. # For example, to use British", "add any new # compilers unless you write the interface for it yourself.", "True, the tags 'draft', 'mathjax' and 'private' have special # meaning. If set", "# 0 = using DATE_FORMAT and TIMEZONE (without JS) # 1 = using", "want support for the $.$ syntax (which may # conflict with running text!),", "by the templates # ############################################################################# # 'Read more...' for the index page, if", "featured_large featured_small featured_on_mobile # featured_large_image_on_mobile featured_strip_html sidebar # bootstrap4 supports: navbar_light (defaults to", "of redirection tuples, [(\"foo/from.html\", \"/bar/to.html\")]. # # A HTML file will be created", "the site # (the thumbnail has ``.thumbnail`` added before the file extension by", "# DISABLE_INDEXES = False # DISABLE_MAIN_ATOM_FEED = False # DISABLE_MAIN_RSS_FEED = False #", "by some browsers as # the browser UI color (eg. Chrome on Android).", "displayed. 'sidebar': '' } } # POSTS and PAGES contains (wildcard, destination, template)", "to inform search engines about /sitemapindex.xml. # ROBOTS_EXCLUSIONS = [\"/archive.html\", \"/category/*.html\"] # Instead", "its background. 'featured_large': False, # Show the first (remaining) two featured posts in", "(1 → h1 will become h2 and so on) # This was a", "is not recommended for HTTP/2.0 when caching is used. # Defaults to True.", "<div id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style\"> # <a class=\"addthis_button_more\">Share</a> # <ul><li><a class=\"addthis_button_facebook\"></a>", "with running text!), just use this config: # KATEX_AUTO_RENDER = \"\"\" # delimiters:", "# to the \"/bar/to.html\" URL. notice that the \"from\" side MUST be a", "{\"margin\": 0}} # } # }); # </script> # \"\"\" # Want to", "MARKDOWN_EXTENSION_CONFIGS = {} # Social buttons. This is sample code for AddThis (which", "comments on picture gallery pages? # COMMENTS_IN_GALLERIES = False # What file should", "make site loading faster in a HTTP/1.1 # environment but is not recommended", "like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT = 'Contents © {date} <a href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN", "CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in # hierarchies. For", "is True (translatable) INDEX_READ_MORE_LINK = '<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' # 'Read more...' for the", "# The format is {\"translationcode\" : \"path/to/translation\" } # the path will be", "a post, the whole path in the hierarchy must be specified, # using", "can mark it as hidden. # The tag will not be displayed on", "400 # IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}' # ############################################################################# # HTML fragments and diverse things", "(Uses the INDEX_FILE setting, so if that is, say, default.html, # it will", "templates. # It can be anything, data, functions, modules, etc. GLOBAL_CONTEXT = {}", "\"\\\\\\\\[\", right: \"\\\\\\\\]\", display: true}, # {left: \"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\", display: true}, #", "done in the code, hope you don't mind ;-) # Note: most Nikola-specific", "# \"rsync -rav --delete output/ joe@my.site:/srv/www/site\", # ] # } # github_deploy configuration", "greeting. (translatable) FRONT_INDEX_HEADER = { DEFAULT_LANG: '' } # URLs to other posts/pages", "equations. # \"HTML-CSS\": { # styles: {'.MathJax_Display': {\"margin\": 0}} # } # });", "(in HTML). # (translatable) CONTENT_FOOTER = 'Contents &copy; {date} <a href=\"mailto:{email}\">{author}</a> - Powered", "Do you want to add a Mathjax config file? # MATHJAX_CONFIG = \"\"", "but will have to be referenced manually to be visible on the site", "3 forms: # rel_path: a relative URL to the current page/post (default) #", "implies SHOW_SOURCELINK = False COPY_SOURCES = False # Modify the number of Post", "URL with the full path from the root # absolute: a complete URL", "forms: # rel_path: a relative URL to the current page/post (default) # full_path:", "in the POSTS tuple. # # 'rest' is reStructuredText # 'markdown' is Markdown", "SHOW_SOURCELINK = False COPY_SOURCES = False # Modify the number of Post per", "this site BLOG_AUTHOR = \"<NAME>\" # (translatable) BLOG_TITLE = \"My Nikola Site\" #", "instead of US English: LOCALES = {'en': 'en_GB'} # LOCALES = {} #", "<a class=\"addthis_button_more\">Share</a> # <ul><li><a class=\"addthis_button_facebook\"></a> # <li><a class=\"addthis_button_google_plusone_share\"></a> # <li><a class=\"addthis_button_linkedin\"></a> # <li><a", "per Index Page # Defaults to 10 # INDEX_DISPLAY_POST_COUNT = 10 # Extra", "# Bundle JS and CSS into single files to make site loading faster", "{date} <a href=\"mailto:{email}\">{author}</a> - Powered by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}' # Things that", "to SITE_URL # BASE_URL = \"https://example.com/\" BLOG_EMAIL = \"<EMAIL>\" BLOG_DESCRIPTION = \"This is", "located # If you don't use an absolute path, it will be considered", "will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE # options, but will", "Data about this site BLOG_AUTHOR = \"<NAME>\" # (translatable) BLOG_TITLE = \"My Nikola", "must be specified, # using a forward slash ('/') to separate paths. Use", "not be displayed on the author list page and posts. # Tag pages", "-i ping`). Or run `nikola check -l`. # You may also want to", "may not always support them. (translatable) # (Bootstrap 4: right-side of navbar, Bootblog", "want to use github_deploy (see below). # You can define multiple presets and", "# Defaults to 10 # INDEX_DISPLAY_POST_COUNT = 10 # Extra things you want", "added right # before </head> # (translatable) # EXTRA_HEAD_DATA = \"\" # Google", "configured with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS = {'images': 'images'} # IMAGE_THUMBNAIL_SIZE = 400 # IMAGE_THUMBNAIL_FORMAT", "'<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' # Append a URL query to the FEED_READ_MORE_LINK in Atom", "output/ joe@my.site:/srv/www/site\" # And then do a backup, or run `nikola ping` from", "option used for traffic source tracking. FEED_LINKS_APPEND_QUERY = False # A HTML fragment", "be effective # if SITE_URL points to server root. The list is used", "set to False, these tags are handled like regular tags. USE_TAG_METADATA = False", "file should be used for directory indexes? # Defaults to index.html # Common", "square # '/url/to/file': show the image in that url GALLERIES_DEFAULT_THUMBNAIL = None #", "# rendered GLOBAL_CONTEXT_FILLER = [] # Settings for the (boot)Reveal theme must be", "top-level category called '/'). CATEGORY_ALLOW_HIERARCHIES = False # If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to", "picture gallery pages? # COMMENTS_IN_GALLERIES = False # What file should be used", "'medium', or 'short' # DATE_FORMAT = 'yyyy-MM-dd HH:mm' # Date format used to", "Options to be passed to markdown extensions (See https://python-markdown.github.io/reference/) # Default is {}", "listings to be processed and published on # the site. The format is", "file? # MATHJAX_CONFIG = \"\" # If you want support for the $.$", "the `ping` # plugin (`nikola plugin -i ping`). Or run `nikola check -l`.", "here and they will be called with template # GLOBAL_CONTEXT as parameter when", "feeds, if FEED_TEASERS is True (translatable) FEED_READ_MORE_LINK = '<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' # Append", "in global_context things you want available on all your templates. # It can", "# the site. The format is a dictionary of {source: relative destination}. #", "are done via the Nikola plugin system, # with the MarkdownExtension class and", "Show link to source for the posts? SHOW_SOURCELINK = False # Copy the", "default, if you want to use # another time zone, please set TIMEZONE", "1 = using LUXON_DATE_FORMAT and local user time (JS, using Luxon) # 2", "that works just # like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT = 'Contents © {date}", "# those. # TEMPLATE_FILTERS = {} # Put in global_context things you want", "{date} {author} {license}' RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS # To use comments, you can choose", "others. # \"pandoc\": ['.rst', '.md', '.txt'], } # Preferred metadata format for new", "default # SCHEDULE_ALL = False # Do you want to add a Mathjax", "for traffic source tracking. FEED_LINKS_APPEND_QUERY = False # A HTML fragment describing the", "text!), just use this config: # MATHJAX_CONFIG = \"\"\" # <script type=\"text/x-mathjax-config\"> #", "the output site should be located # If you don't use an absolute", "displayMath: [ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ], # processEscapes: true # }, # displayAlign: 'center',", "to the location of conf.py # OUTPUT_FOLDER = 'output' # where the \"cache\"", "featured_strip_html sidebar # bootstrap4 supports: navbar_light (defaults to False) # navbar_custom_bg (defaults to", "If you do not want to display a tag publicly, you can mark", "= \"\" # Create index.html for page folders? # WARNING: if a page", "DEPLOY_COMMANDS = { # 'default': [ # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\", #", "using Luxon) # # Your theme must support it, Bootstrap already does. #", "slash or a backslash (i.e. '\\//\\\\' is a path specifying the # subcategory", "HTML fragment describing the license, for the sidebar. # (translatable) LICENSE = \"\"", "of the indexes plugin. # Use with care. # DISABLE_INDEXES = False #", "down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE # options, but will have to be", "with the full path from the root # absolute: a complete URL (that", "False # If set to True, a warning is issued if one of", "'/url/to/file': show the image in that url GALLERIES_DEFAULT_THUMBNAIL = None # Images will", "Be careful :-) # DISABLED_PLUGINS = [\"render_galleries\"] # Special settings to disable only", "extensions to any list, but don't add any new # compilers unless you", "{ DEFAULT_LANG: \"\", # Example for another language: # \"es\": \"./es\", } #", "github_deploy should commit to the source branch automatically # before deploying. GITHUB_COMMIT_SOURCE =", "buttons --> # \"\"\" # Show link to source for the posts? SHOW_SOURCELINK", "A mapping of languages to file-extensions that represent that language. # Feel free", "yet, it's faster and the output looks better. # USE_KATEX = False #", "will be deployed. # If not set, defaults to SITE_URL # BASE_URL =", "copied as-is into the output. # The format is a dictionary of {source:", "those filters, along with the defaults. # Consult your engine's documentation on filters", "HEX value. THEME_COLOR = '#5670d4' # Theme configuration. Fully theme-dependent. (translatable) # Samples", "template is about to be # rendered GLOBAL_CONTEXT_FILLER = [] # Settings for", "# RSS filename base (without extension); used for indexes and galleries. # (translatable)", "to display a category publicly, you can mark it as hidden. # The", "set TIMEZONE to match. Check the available # list from Wikipedia: TIMEZONE =", "display a tag publicly, you can mark it as hidden. # The tag", "must be added to the global context. # subtheme selection: beige/serif/simple/sky/night/default # transition", "\"server side includes\" / partials # REQUIRES the use of 'full_path' # URL_TYPE", "# Show the latest featured post in a large box, with the previewimage", "format for new posts # \"YAML\": YAML wrapped in \"---\" METADATA_FORMAT = \"YAML\"", "author list page and posts. # Tag pages will still be generated. HIDDEN_AUTHORS", "notice that the \"from\" side MUST be a # relative URL. # #", "a demo site for Nikola.\" # (translatable) # What is the default language?", "pretty_url: False # to the metadata. PRETTY_URLS = False # If True, publish", "github_deploy configuration # For more details, read the manual: # https://getnikola.com/handbook.html#deploying-to-github # You", "URL (that includes the SITE_URL) # URL_TYPE = 'rel_path' # # Note that", "be used as a prefix for the generated pages location TRANSLATIONS = {", "This is a dict. The keys are languages, and values are tuples. NAVIGATION_LINKS", "to True and there is more than one # author, author pages are", "# default: 'cache' # CACHE_FOLDER = 'cache' # ############################################################################# # Image Gallery Options", "so if that is, say, default.html, # it will instead /foo/default.html => /foo)", "the latest featured post in a large box, with the previewimage as its", "commands to execute to deploy. Can be anything, for # example, you may", "= \"disimplex\" # A theme color. In default themes, it might be displayed", "True (translatable) INDEX_READ_MORE_LINK = '<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' # 'Read more...' for the feeds,", "to display post dates, if local dates are used. (translatable) # Used by", "want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value. # DEPLOY_FUTURE = False", "not the whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY = False # If you do not want", "# DISABLE_MAIN_ATOM_FEED = False # DISABLE_MAIN_RSS_FEED = False # Add the absolute paths", "# KATEX_AUTO_RENDER = \"\"\" # delimiters: [ # {left: \"$$\", right: \"$$\", display:", "Android). Other themes might also use it # as an accent color (the", "= False # If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to", "= 'output' # where the \"cache\" of partial generated content should be located", "Common other alternatives: default.html for IIS, index.php # INDEX_FILE = \"index.html\" # If", "Must be a HEX value. THEME_COLOR = '#5670d4' # Theme configuration. Fully theme-dependent.", "hidden. # The author will not be displayed on the author list page", "for inclusion in RSS feeds that works just # like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS", "DEFAULT_LANG: \"\", # Example for another language: # \"es\": \"./es\", } # What", "includes\" / partials # REQUIRES the use of 'full_path' # URL_TYPE = 'full_path'", "Works the same way NAVIGATION_LINKS does, # although themes may not always support", "to display post dates. (translatable) # Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time #", "as thumbnail for those galleries that don't have one # None: show a", "the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced # option used for traffic", "= {} # Put in global_context things you want available on all your", "# If not set, defaults to SITE_URL # BASE_URL = \"https://example.com/\" BLOG_EMAIL =", "it to False implies SHOW_SOURCELINK = False COPY_SOURCES = False # Modify the", "to directories containing plugins to use them. # For example, the `plugins` directory", "Nikola plugin system, # with the MarkdownExtension class and should not be added", "GITHUB_SOURCE_BRANCH = 'src' GITHUB_DEPLOY_BRANCH = 'master' # The name of the remote where", "Enable comments on picture gallery pages? # COMMENTS_IN_GALLERIES = False # What file", "be added to the global context. # subtheme selection: beige/serif/simple/sky/night/default # transition selection:", "If True, use the scheduling rule to all posts (not pages!) by default", "the global context. # subtheme selection: beige/serif/simple/sky/night/default # transition selection: cube/page/concave/linear/none/default GLOBAL_CONTEXT.update({ 'subtheme':", "feature of the Markdown and reST compilers in the # past. Useful especially", "False, 'format': 'yyyy-MM-dd HH:mm'}, # } # Date fanciness. # # 0 =", "whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY = False # If you do not want to display", "HTTP/2.0 when caching is used. # Defaults to True. # USE_BUNDLES = True", "commit to the source branch automatically # before deploying. GITHUB_COMMIT_SOURCE = True #", "\"\" # Google Analytics or whatever else you use. Added to the bottom", "page, if INDEX_TEASERS is True (translatable) INDEX_READ_MORE_LINK = '<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' # 'Read", "# If True, publish future dated posts right away instead of scheduling them.", "single files to make site loading faster in a HTTP/1.1 # environment but", "file is HTML and just copies it COMPILERS = { \"rest\": ['.rst', '.txt'],", "# {left: \"\\\\\\\\[\", right: \"\\\\\\\\]\", display: true}, # {left: \"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\", display:", "want to display an author publicly, you can mark it as hidden. #", "= \"feed\" # Extension for Atom feed files # ATOM_EXTENSION = \".atom\" #", "None: show a grey square # '/url/to/file': show the image in that url", "generated content should be located # default: 'cache' # CACHE_FOLDER = 'cache' #", "and there is more than one # author, author pages are generated. ENABLE_AUTHOR_PAGES", "URL. notice that the \"from\" side MUST be a # relative URL. #", "\"\" # Create index.html for page folders? # WARNING: if a page would", "main URL for your site. It will be used # in a prominent", "does. # DATE_FANCINESS = 0 # Customize the locale/region used for a language.", "\"html\": ['.html', '.htm'], # PHP files are rendered the usual way (i.e. with", "need to configure the deployment branch on GitHub. GITHUB_SOURCE_BRANCH = 'src' GITHUB_DEPLOY_BRANCH =", "as parameter when the template is about to be # rendered GLOBAL_CONTEXT_FILLER =", "TIMEZONE to match. Check the available # list from Wikipedia: TIMEZONE = \"Europe/London\"", "many of the others. # \"pandoc\": ['.rst', '.md', '.txt'], } # Preferred metadata", "a relative URL to the current page/post (default) # full_path: a URL with", "'full_path' # Extension for RSS feed files # RSS_EXTENSION = \".xml\" # RSS", "simple copyright tag for inclusion in RSS feeds that works just # like", "support every # feature yet, it's faster and the output looks better. #", "--> # <div id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style\"> # <a class=\"addthis_button_more\">Share</a> #", "= { DEFAULT_LANG: { # Show the latest featured post in a large", "as many presets # in a `nikola deploy` command as you like. #", "\"date\": time.gmtime().tm_year, \"license\": LICENSE } ) } # A simple copyright tag for", "be displayed on the tag list page and posts. # Tag pages will", "list page and posts. # Tag pages will still be generated. HIDDEN_TAGS =", "should be located # If you don't use an absolute path, it will", "}, # displayAlign: 'center', // Change this to 'left' if you want left-aligned", "STRIP_INDEXES = False # List of files relative to the server root (!)", "featured posts on mobile. 'featured_on_mobile': True, # Show image in `featured_large` on mobile.", "specify them as arguments # to `nikola deploy`. If no arguments are specified,", "IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS = {'images': 'images'} # IMAGE_THUMBNAIL_SIZE = 400 # IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}'", "server to recognize them. \"php\": ['.php'], # Pandoc detects the input from the", "RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS # To use comments, you can choose between different third", "INDEX_FILE = \"index.html\" # If a link ends in /index.html, drop the index.html", "to be passed to markdown extensions (See https://python-markdown.github.io/reference/) # Default is {} (no", "and DEPLOY_FUTURE to be the same value. # DEPLOY_FUTURE = False # If", "Mathjax config file? # MATHJAX_CONFIG = \"\" # If you want support for", "DEFAULT_LANG = \"en\" # What other languages do you have? # The format", "- Powered by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}' # Things that will be passed", "deployed output # Only the individual posts are published/deployed; not in indexes/sitemap #", "True, use the scheduling rule to all posts (not pages!) by default #", "where the \"cache\" of partial generated content should be located # default: 'cache'", "/index.html, drop the index.html part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses the INDEX_FILE", "False # A HTML fragment describing the license, for the sidebar. # (translatable)", "/ navigation bar. (translatable) # This is a dict. The keys are languages,", "etc. GLOBAL_CONTEXT = {} # Add functions here and they will be called", "used as a prefix for the generated pages location TRANSLATIONS = { DEFAULT_LANG:", "# full_path: a URL with the full path from the root # absolute:", "= False # If set to True, a warning is issued if one", "redirection tuples, [(\"foo/from.html\", \"/bar/to.html\")]. # # A HTML file will be created in", "at all) # MARKDOWN_EXTENSION_CONFIGS = {} # Social buttons. This is sample code", "first entry in the POSTS tuple. # # 'rest' is reStructuredText # 'markdown'", "or delete extensions to any list, but don't add any new # compilers", "the \"from\" side MUST be a # relative URL. # # If you", "the URL where Nikola's output will be deployed. # If not set, defaults", "page footer (in HTML). # (translatable) CONTENT_FOOTER = 'Contents &copy; {date} <a href=\"mailto:{email}\">{author}</a>", "class=\"addthis_button_more\">Share</a> # <ul><li><a class=\"addthis_button_facebook\"></a> # <li><a class=\"addthis_button_google_plusone_share\"></a> # <li><a class=\"addthis_button_linkedin\"></a> # <li><a class=\"addthis_button_twitter\"></a>", "output looks better. # USE_KATEX = False # KaTeX auto-render settings. If you", "of partial generated content should be located # default: 'cache' # CACHE_FOLDER =", "for the index page, if INDEX_TEASERS is True (translatable) INDEX_READ_MORE_LINK = '<p class=\"more\"><a", "settings to disable only parts of the indexes plugin. # Use with care.", "(which is # the default right now) # (translatable) # SOCIAL_BUTTONS_CODE = \"\"\"", "of the remote where you wish to push to, using github_deploy. GITHUB_REMOTE_NAME =", "(\"posts/*.html\", \"posts\", \"post.tmpl\"), ) PAGES = ( (\"pages/*.rst\", \"\", \"page.tmpl\"), (\"pages/*.md\", \"\", \"page.tmpl\"),", "TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' # Links for the sidebar / navigation bar. (translatable) #", "navigation links. Works the same way NAVIGATION_LINKS does, # although themes may not", "a thumbnail (defined by \".. previewimage:\" in the gallery's index) in # list", "can mark it as hidden. # The category will not be displayed on", "to execute to deploy. Can be anything, for # example, you may use", "Luxon: https://moment.github.io/luxon/docs/manual/formatting # Example for presets: {'preset': True, 'format': 'DATE_FULL'} # LUXON_DATE_FORMAT =", "{'images': 'images'} # IMAGE_THUMBNAIL_SIZE = 400 # IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}' # ############################################################################# #", "# displayAlign: 'center', // Change this to 'left' if you want left-aligned equations.", "don't want to use. Be careful :-) # DISABLED_PLUGINS = [\"render_galleries\"] # Special", "Luxon) # # Your theme must support it, Bootstrap already does. # DATE_FANCINESS", "tag publicly, you can mark it as hidden. # The tag will not", "index page, if INDEX_TEASERS is True (translatable) INDEX_READ_MORE_LINK = '<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' #", "True, future dated posts are allowed in deployed output # Only the individual", "{left: \"$$\", right: \"$$\", display: true}, # {left: \"\\\\\\\\[\", right: \"\\\\\\\\]\", display: true},", "will be used as a prefix for the generated pages location TRANSLATIONS =", "is # \"nikolademo\" which is a test account for Disqus. More information #", "# in a prominent link. Don't forget the protocol (http/https)! SITE_URL = \"https://example.com/\"", "HIDDEN_TAGS = ['mathjax'] # If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be", "(\"/categories/\", \"Tags\"), (\"/rss.xml\", \"RSS feed\"), ), } # Alternative navigation links. Works the", "a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS = {'images': 'images'}", "be added right # before </head> # (translatable) # EXTRA_HEAD_DATA = \"\" #", "and should not be added here. # Defaults are markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta is", "is done CONTENT_FOOTER_FORMATS = { DEFAULT_LANG: ( (), { \"email\": BLOG_EMAIL, \"author\": BLOG_AUTHOR,", "social buttons --> # \"\"\" # Show link to source for the posts?", "with the previewimage as its background. 'featured_large': False, # Show the first (remaining)", "time zone, please set TIMEZONE to match. Check the available # list from", "be excluded # from indexing and other robotic spidering. * is supported. Will", "############################################################################# # HTML fragments and diverse things that are used by the templates", "'.txt'], } # Preferred metadata format for new posts # \"YAML\": YAML wrapped", "be a # relative URL. # # If you don't need any of", "making it possible to run # them without reconfiguring your server to recognize", "# it will instead /foo/default.html => /foo) STRIP_INDEXES = False # List of", "of your clone of the Nikola plugins # repository. # EXTRA_PLUGINS_DIRS = []", "Use a backslash ('\\') to escape # a forward slash or a backslash", "compilers (reST/Markdown) # will be demoted by that much (1 → h1 will", "on) # This was a hidden feature of the Markdown and reST compilers", "4: right-side of navbar, Bootblog 4: right side of title) NAVIGATION_ALT_LINKS = {", "(translatable) ATOM_FILENAME_BASE = \"feed\" # Extension for Atom feed files # ATOM_EXTENSION =", "the top-level category called '/'). CATEGORY_ALLOW_HIERARCHIES = False # If CATEGORY_OUTPUT_FLAT_HIERARCHY is set", "{'preset': False, 'format': 'yyyy-MM-dd HH:mm'}, # } # Date fanciness. # # 0", "\"$\", right: \"$\", display: false}, # {left: \"\\\\\\\\(\", right: \"\\\\\\\\)\", display: false} #", "of scheduling them. # Defaults to False. # FUTURE_IS_NOW = False # If", "(See https://python-markdown.github.io/reference/) # Default is {} (no config at all) # MARKDOWN_EXTENSION_CONFIGS =", "COMMENTS_IN_GALLERIES = False # What file should be used for directory indexes? #", "# For example, to use British instead of US English: LOCALES = {'en':", "Google Analytics or whatever else you use. Added to the bottom of <body>", "every # feature yet, it's faster and the output looks better. # USE_KATEX", "page. # Category pages will still be generated. HIDDEN_CATEGORIES = [] # If", "be executed. You can use as many presets # in a `nikola deploy`", "# Feel free to add or delete extensions to any list, but don't", "prefix for the generated pages location TRANSLATIONS = { DEFAULT_LANG: \"\", # Example", "from the source filename # but is disabled by default as it would", "the image in that url GALLERIES_DEFAULT_THUMBNAIL = None # Images will be scaled", "facebook, intensedebate, isso, muut, commento, utterances # You can leave this option blank", "these tags are handled like regular tags. USE_TAG_METADATA = False # If set", "= 'src' GITHUB_DEPLOY_BRANCH = 'master' # The name of the remote where you", "before </head> # (translatable) # EXTRA_HEAD_DATA = \"\" # Google Analytics or whatever", "to the bottom of <body> # in the default template (base.tmpl). # (translatable)", "be deployed # DEPLOY_DRAFTS = True # Allows scheduling of posts using the", "POSTS and PAGES contains (wildcard, destination, template) tuples. # (translatable) # POSTS =", "to [] REDIRECTIONS = [] # Presets of commands to execute to deploy.", "category called '/'). CATEGORY_ALLOW_HIERARCHIES = False # If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True,", "by adding # .. pretty_url: False # to the metadata. PRETTY_URLS = False", "support it, Bootstrap already does. # DATE_FANCINESS = 0 # Customize the locale/region", "\"HTML-CSS\": { # styles: {'.MathJax_Display': {\"margin\": 0}} # } # }); # </script>", "about /sitemapindex.xml. # ROBOTS_EXCLUSIONS = [\"/archive.html\", \"/category/*.html\"] # Instead of putting files in", "notice for the page footer (in HTML). # (translatable) CONTENT_FOOTER = 'Contents &copy;", "right side of title) NAVIGATION_ALT_LINKS = { DEFAULT_LANG: () } # Name of", "not in indexes/sitemap # Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the", "as you like. # DEPLOY_COMMANDS = { # 'default': [ # \"rsync -rav", "CONTENT_FOOTER = 'Contents &copy; {date} <a href=\"mailto:{email}\">{author}</a> - Powered by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a>", "only the name of the leaf category and not the whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY", "RSS feed files # RSS_EXTENSION = \".xml\" # RSS filename base (without extension);", "[\"/archive.html\", \"/category/*.html\"] # Instead of putting files in <slug>.html, put them in <slug>/index.html.", "get gist, nikola and podcast because those are # done in the code,", "/sitemap.xml, and to inform search engines about /sitemapindex.xml. # ROBOTS_EXCLUSIONS = [\"/archive.html\", \"/category/*.html\"]", "a dictionary of {source: relative destination}. # Default is: # LISTINGS_FOLDERS = {'listings':", "You can also use 'full', 'long', 'medium', or 'short' # DATE_FORMAT = 'yyyy-MM-dd", "not want to display an author publicly, you can mark it as hidden.", "and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT = 'Contents © {date} <a href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN = 'Contents", "# (the thumbnail has ``.thumbnail`` added before the file extension by default, #", "has ``.thumbnail`` added before the file extension by default, # but a different", "may # conflict with running text!), just use this config: # KATEX_AUTO_RENDER =", "specified here (new_post -s) # Specify an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE", "faster and the output looks better. # USE_KATEX = False # KaTeX auto-render", "If False, draft posts will not be deployed # DEPLOY_DRAFTS = True #", "long time). Insert anything you want here, or even make it empty (which", "Post's dates are considered in UTC by default, if you want to use", "True, the output written to output # contains only the name of the", "Add the absolute paths to directories containing plugins to use them. # For", "If you do not want to display a category publicly, you can mark", "use an absolute path, it will be considered as relative # to the", "a Mathjax config file? # MATHJAX_CONFIG = \"\" # If you want support", "SOCIAL_BUTTONS_CODE = \"\"\" # <!-- Social buttons --> # <div id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style", "joe@my.site:/srv/www/site\", # ] # } # github_deploy configuration # For more details, read", "--delete output/ joe@my.site:/srv/www/site\", # ] # } # github_deploy configuration # For more", "config at all) # MARKDOWN_EXTENSION_CONFIGS = {} # Social buttons. This is sample", "to display a tag publicly, you can mark it as hidden. # The", "the $.$ syntax (which may # conflict with running text!), just use this", "# <!-- End of social buttons --> # \"\"\" # Show link to", "pages are generated. ENABLE_AUTHOR_PAGES = False # If you do not want to", "to use # another time zone, please set TIMEZONE to match. Check the", "just # like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT = 'Contents © {date} <a href=\"mailto:{email}\">{author}</a>", "do not want to display a tag publicly, you can mark it as", "The following comment systems are supported by Nikola: # disqus, facebook, intensedebate, isso,", "have .php extensions, making it possible to run # them without reconfiguring your", "and posts. # Tag pages will still be generated. HIDDEN_AUTHORS = ['Guest'] #", "Note: most Nikola-specific extensions are done via the Nikola plugin system, # with", "demo site for Nikola.\" # (translatable) # What is the default language? DEFAULT_LANG", "want to display a category publicly, you can mark it as hidden. #", "specified, a preset # named `default` will be executed. You can use as", "and diverse things that are used by the templates # ############################################################################# # 'Read", "from featured post text. 'featured_strip_html': False, # Contents of the sidebar, If empty,", "in the gallery's index) in # list of galleries for each gallery GALLERIES_USE_THUMBNAIL", "a test account for Disqus. More information # is in the manual. COMMENT_SYSTEM_ID", "For more details, read the manual: # https://getnikola.com/handbook.html#deploying-to-github # You will need to", "DEFAULT_LANG: () } # Name of the theme to use. #THEME = \"bootblog4\"", "commento, utterances # You can leave this option blank to disable comments. COMMENT_SYSTEM", "use. The default is # \"nikolademo\" which is a test account for Disqus.", "= False # List of files relative to the server root (!) that", "web server configuration is required. Also enables STRIP_INDEXES. # This can be disabled", "</div> # <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!-- End of social buttons --> # \"\"\"", "HTML). # (translatable) CONTENT_FOOTER = 'Contents &copy; {date} <a href=\"mailto:{email}\">{author}</a> - Powered by", "you use. Added to the bottom of <body> # in the default template", "will be passed to CONTENT_FOOTER.format(). This is done CONTENT_FOOTER_FORMATS = { DEFAULT_LANG: (", "add or delete extensions to any list, but don't add any new #", "!! # # Data about this site BLOG_AUTHOR = \"<NAME>\" # (translatable) BLOG_TITLE", "of title) NAVIGATION_ALT_LINKS = { DEFAULT_LANG: () } # Name of the theme", "# Extension for RSS feed files # RSS_EXTENSION = \".xml\" # RSS filename", "templates # ############################################################################# # 'Read more...' for the index page, if INDEX_TEASERS is", "indexes and galleries. # (translatable) # RSS_FILENAME_BASE = \"rss\" # Atom filename base", "# }, # displayAlign: 'center', // Change this to 'left' if you want", "{ # DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'}, # } # Date fanciness.", "# 1 = using LUXON_DATE_FORMAT and local user time (JS, using Luxon) #", "( (), { \"email\": BLOG_EMAIL, \"author\": BLOG_AUTHOR, \"date\": time.gmtime().tm_year, \"license\": LICENSE } )", "# Show featured posts on mobile. 'featured_on_mobile': True, # Show image in `featured_large`", "the $.$ syntax (which may conflict with running # text!), just use this", "In default themes, it might be displayed by some browsers as # the", "dated posts are allowed in deployed output # Only the individual posts are", "= \"\" # Google Analytics or whatever else you use. Added to the", "be # rendered GLOBAL_CONTEXT_FILLER = [] # Settings for the (boot)Reveal theme must", "\"\\\\\\\\)\", display: false} # ] # \"\"\" # What Markdown extensions to enable?", "# Note: most Nikola-specific extensions are done via the Nikola plugin system, #", "British instead of US English: LOCALES = {'en': 'en_GB'} # LOCALES = {}", "# SCHEDULE_ALL = False # Do you want to add a Mathjax config", "# (translatable) # EXTRA_HEAD_DATA = \"\" # Google Analytics or whatever else you", "environment but is not recommended for HTTP/2.0 when caching is used. # Defaults", "use as thumbnail for those galleries that don't have one # None: show", ") PAGES = ( (\"pages/*.rst\", \"\", \"page.tmpl\"), (\"pages/*.md\", \"\", \"page.tmpl\"), (\"pages/*.txt\", \"\", \"page.tmpl\"),", "HTTP/1.1 # environment but is not recommended for HTTP/2.0 when caching is used.", "functions here and they will be called with template # GLOBAL_CONTEXT as parameter", "rendered GLOBAL_CONTEXT_FILLER = [] # Settings for the (boot)Reveal theme must be added", "= { DEFAULT_LANG: ( (\"/archive.html\", \"Archive\"), (\"/categories/\", \"Tags\"), (\"/rss.xml\", \"RSS feed\"), ), }", "GITHUB_DEPLOY_BRANCH = 'master' # The name of the remote where you wish to", "# <div id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style\"> # <a class=\"addthis_button_more\">Share</a> # <ul><li><a", "it yourself. # # The default compiler for `new_post` is the first entry", "page would conflict with the index file (usually # caused by setting slug", "is the configuration of Nikola. !! # # !! You should edit it", "= [] # If ENABLE_AUTHOR_PAGES is set to True and there is more", "FEED_READ_MORE_LINK = '<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' # Append a URL query to the FEED_READ_MORE_LINK", "follow. # bootblog4 supports: featured_large featured_small featured_on_mobile # featured_large_image_on_mobile featured_strip_html sidebar # bootstrap4", "from the root # absolute: a complete URL (that includes the SITE_URL) #", "If True, publish future dated posts right away instead of scheduling them. #", "} } # POSTS and PAGES contains (wildcard, destination, template) tuples. # (translatable)", "0}} # } # }); # </script> # \"\"\" # Want to use", "for the $.$ syntax (which may conflict with running # text!), just use", ": \"path/to/translation\" } # the path will be used as a prefix for", "The format is a dictionary of {source: relative destination}. # Default is: #", "# (translatable) # SOCIAL_BUTTONS_CODE = \"\"\" # <!-- Social buttons --> # <div", "used. # Defaults to True. # USE_BUNDLES = True USE_BUNDLES = False #", "CONTENT_FOOTER and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT = 'Contents © {date} <a href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN =", "class=\"addthis_button_twitter\"></a> # </ul> # </div> # <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!-- End of social", "# Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time # You can also use 'full',", "# (defaults to 1.) # DEMOTE_HEADERS = 1 # If set to True,", "['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc'] # Options to be passed to markdown extensions (See", "posts in small boxes. 'featured_small': False, # Show featured posts on mobile. 'featured_on_mobile':", "MarkdownExtension class and should not be added here. # Defaults are markdown.extensions.(fenced_code|codehilite|extra) #", "server root. The list is used to exclude resources from # /robots.txt and", "# If you do not want to display a tag publicly, you can", "title) NAVIGATION_ALT_LINKS = { DEFAULT_LANG: () } # Name of the theme to", "Atom filename base (without extension); used for indexes. # (translatable) ATOM_FILENAME_BASE = \"feed\"", "side of title) NAVIGATION_ALT_LINKS = { DEFAULT_LANG: () } # Name of the", "What file should be used for directory indexes? # Defaults to index.html #", "be a HEX value. THEME_COLOR = '#5670d4' # Theme configuration. Fully theme-dependent. (translatable)", "files are rendered the usual way (i.e. with the full templates). # The", "= \".atom\" # A list of redirection tuples, [(\"foo/from.html\", \"/bar/to.html\")]. # # A", "that redirects # to the \"/bar/to.html\" URL. notice that the \"from\" side MUST", "done via the Nikola plugin system, # with the MarkdownExtension class and should", "# DISABLE_MAIN_RSS_FEED = False # Add the absolute paths to directories containing plugins", "= {} # One or more folders containing files to be copied as-is", "= {} # Add functions here and they will be called with template", "`index`), the PAGE_INDEX # will not be generated for that directory. # PAGE_INDEX", "It will be used # in a prominent link. Don't forget the protocol", "→ h1 will become h2 and so on) # This was a hidden", "} # Date fanciness. # # 0 = using DATE_FORMAT and TIMEZONE (without", "to markdown extensions (See https://python-markdown.github.io/reference/) # Default is {} (no config at all)", "<a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img alt=\"Creative Commons License BY-NC-SA\" # style=\"border-width:0; margin-bottom:12px;\" #", "posts on mobile. 'featured_on_mobile': True, # Show image in `featured_large` on mobile. #", "is a demo site for Nikola.\" # (translatable) # What is the default", "[ # {left: \"$$\", right: \"$$\", display: true}, # {left: \"\\\\\\\\[\", right: \"\\\\\\\\]\",", "as-is into the output. # The format is a dictionary of {source: relative", "spidering. * is supported. Will only be effective # if SITE_URL points to", "Options # ############################################################################# # Use a thumbnail (defined by \".. previewimage:\" in the", "if one of the 'draft', 'mathjax' # and 'private' tags are found in", "# \"YAML\": YAML wrapped in \"---\" METADATA_FORMAT = \"YAML\" # If you do", "should not be added here. # Defaults are markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta is required", "(translatable) # Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time # You can also use", "backslash ('\\') to escape # a forward slash or a backslash (i.e. '\\//\\\\'", "# `featured_small` displays them only on desktop. 'featured_large_image_on_mobile': True, # Strip HTML from", "conf.py # OUTPUT_FOLDER = 'output' # where the \"cache\" of partial generated content", "branch on GitHub. GITHUB_SOURCE_BRANCH = 'src' GITHUB_DEPLOY_BRANCH = 'master' # The name of", "comments on pages (i.e. not posts)? # COMMENTS_IN_PAGES = False # Enable comments", "option blank to disable comments. COMMENT_SYSTEM = \"\" # And you also need", "the first (remaining) two featured posts in small boxes. 'featured_small': False, # Show", "successful. WARN_ABOUT_TAG_METADATA = False # Templates will use those filters, along with the", "syntax (which may conflict with running # text!), just use this config: #", "be organized in # hierarchies. For a post, the whole path in the", "\"posts\", \"post.tmpl\"), (\"posts/*.txt\", \"posts\", \"post.tmpl\"), (\"posts/*.html\", \"posts\", \"post.tmpl\"), ) PAGES = ( (\"pages/*.rst\",", "{} # One or more folders containing files to be copied as-is into", "separate paths. Use a backslash ('\\') to escape # a forward slash or", "will also get gist, nikola and podcast because those are # done in", "# Append a URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds.", "for the (boot)Reveal theme must be added to the global context. # subtheme", "like “2 days ago” (JS, using Luxon) # # Your theme must support", "If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output # contains", "more...' for the feeds, if FEED_TEASERS is True (translatable) FEED_READ_MORE_LINK = '<p><a href=\"{link}\">{read_more}…</a>", "#THEME = \"bootblog4\" THEME = \"disimplex\" # A theme color. In default themes,", ":-) # DISABLED_PLUGINS = [\"render_galleries\"] # Special settings to disable only parts of", "'markdown' is Markdown # 'html' assumes the file is HTML and just copies", "whatever else you use. Added to the bottom of <body> # in the", "disable comments. COMMENT_SYSTEM = \"\" # And you also need to add your", "of languages to file-extensions that represent that language. # Feel free to add", "right away instead of scheduling them. # Defaults to False. # FUTURE_IS_NOW =", "partials # REQUIRES the use of 'full_path' # URL_TYPE = 'full_path' # Extension", "is True (translatable) FEED_READ_MORE_LINK = '<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' # Append a URL query", "[ # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\", # ] # } # github_deploy", "\"\"\" # <a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img alt=\"Creative Commons License BY-NC-SA\" # style=\"border-width:0;", "# COMMENTS_IN_GALLERIES = False # What file should be used for directory indexes?", "URL to the current page/post (default) # full_path: a URL with the full", "EXTRA_PLUGINS_DIRS = [] # Add the absolute paths to directories containing themes to", "rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img alt=\"Creative Commons License BY-NC-SA\" # style=\"border-width:0; margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\"", "will be demoted by that much (1 → h1 will become h2 and", "presets: {'preset': True, 'format': 'DATE_FULL'} # LUXON_DATE_FORMAT = { # DEFAULT_LANG: {'preset': False,", "compilers in the # past. Useful especially if your post titles are in", "featured_small featured_on_mobile # featured_large_image_on_mobile featured_strip_html sidebar # bootstrap4 supports: navbar_light (defaults to False)", "recommended for HTTP/2.0 when caching is used. # Defaults to True. # USE_BUNDLES", "too, for # example. # (defaults to 1.) # DEMOTE_HEADERS = 1 #", "just set to [] REDIRECTIONS = [] # Presets of commands to execute", "# Show image in `featured_large` on mobile. # `featured_small` displays them only on", "output # Only the individual posts are published/deployed; not in indexes/sitemap # Generally,", "= False # Plugins you don't want to use. Be careful :-) #", "to `nikola deploy`. If no arguments are specified, a preset # named `default`", "for another language: # \"es\": \"./es\", } # What will translated input files", "use British instead of US English: LOCALES = {'en': 'en_GB'} # LOCALES =", "per-page/post basis by adding # .. pretty_url: False # to the metadata. PRETTY_URLS", "['.html', '.htm'], # PHP files are rendered the usual way (i.e. with the", "and just copies it COMPILERS = { \"rest\": ['.rst', '.txt'], \"markdown\": ['.md', '.mdown',", "the number of Post per Index Page # Defaults to 10 # INDEX_DISPLAY_POST_COUNT", "in the hierarchy must be specified, # using a forward slash ('/') to", "and podcast because those are # done in the code, hope you don't", "# I recommend using the Creative Commons' wizard: # https://creativecommons.org/choose/ # LICENSE =", "is: # FILES_FOLDERS = {'files': ''} # Which means copy 'files' into 'output'", "\"\"\" # What Markdown extensions to enable? # You will also get gist,", "clone of the Nikola plugins # repository. # EXTRA_PLUGINS_DIRS = [] # Add", "Powered by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}' # Things that will be passed to", "used # in a prominent link. Don't forget the protocol (http/https)! SITE_URL =", "TIMEZONE (without JS) # 1 = using LUXON_DATE_FORMAT and local user time (JS,", "there is more than one # author, author pages are generated. ENABLE_AUTHOR_PAGES =", "be disabled on a per-page/post basis by adding # .. pretty_url: False #", "below). # You can define multiple presets and specify them as arguments #", "rendered the usual way (i.e. with the full templates). # The resulting files", "Advanced # option used for traffic source tracking. FEED_LINKS_APPEND_QUERY = False # A", "(translatable) CONTENT_FOOTER = 'Contents &copy; {date} <a href=\"mailto:{email}\">{author}</a> - Powered by <a href=\"https://getnikola.com\"", "inclusion in RSS feeds that works just # like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT", "= {'files': ''} # Which means copy 'files' into 'output' # One or", "other languages do you have? # The format is {\"translationcode\" : \"path/to/translation\" }", "specifying the # subcategory called '\\' of the top-level category called '/'). CATEGORY_ALLOW_HIERARCHIES", "# # 0 = using DATE_FORMAT and TIMEZONE (without JS) # 1 =", "party comment # systems. The following comment systems are supported by Nikola: #", "\"\"\" # delimiters: [ # {left: \"$$\", right: \"$$\", display: true}, # {left:", "iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE = '' # If True, use the", "GLOBAL_CONTEXT_FILLER = [] # Settings for the (boot)Reveal theme must be added to", "If True, future dated posts are allowed in deployed output # Only the", "['.rst', '.md', '.txt'], } # Preferred metadata format for new posts # \"YAML\":", "redirects # to the \"/bar/to.html\" URL. notice that the \"from\" side MUST be", "# To use comments, you can choose between different third party comment #", "AddThis (which was the default for a # long time). Insert anything you", "still be generated. HIDDEN_TAGS = ['mathjax'] # If CATEGORY_ALLOW_HIERARCHIES is set to True,", "NAVIGATION_LINKS does, # although themes may not always support them. (translatable) # (Bootstrap", "# subtheme selection: beige/serif/simple/sky/night/default # transition selection: cube/page/concave/linear/none/default GLOBAL_CONTEXT.update({ 'subtheme': 'simple', 'transition': 'none'", "the scheduling rule to all posts (not pages!) by default # SCHEDULE_ALL =", "# Defaults are markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta is required for Markdown metadata. MARKDOWN_EXTENSIONS =", "'.mdown', '.markdown'], \"textile\": ['.textile'], \"txt2tags\": ['.t2t'], \"bbcode\": ['.bb'], \"wiki\": ['.wiki'], \"ipynb\": ['.ipynb'], \"html\":", "# will be demoted by that much (1 → h1 will become h2", "indexing and other robotic spidering. * is supported. Will only be effective #", "list, but don't add any new # compilers unless you write the interface", "be processed and published on # the site. The format is a dictionary", "# Image to use as thumbnail for those galleries that don't have one", "page folders? # WARNING: if a page would conflict with the index file", "POSTS tuple. # # 'rest' is reStructuredText # 'markdown' is Markdown # 'html'", "(usually # caused by setting slug to `index`), the PAGE_INDEX # will not", "be displayed on the author list page and posts. # Tag pages will", "time # !! This is the configuration of Nikola. !! # # !!", "Nikola. !! # # !! You should edit it to your liking. !!", "'output/listings' # A mapping of languages to file-extensions that represent that language. #", "posts (not pages!) by default # SCHEDULE_ALL = False # Do you want", "found in a post. Useful for checking that # migration was successful. WARN_ABOUT_TAG_METADATA", "to '') # Config for bootblog4: THEME_CONFIG = { DEFAULT_LANG: { # Show", "Theme configuration. Fully theme-dependent. (translatable) # Samples for bootblog4 (enabled) and bootstrap4 (commented)", "link. Don't forget the protocol (http/https)! SITE_URL = \"https://example.com/\" # This is the", "# Default is: # LISTINGS_FOLDERS = {'listings': 'listings'} # Which means process listings", "search engines about /sitemapindex.xml. # ROBOTS_EXCLUSIONS = [\"/archive.html\", \"/category/*.html\"] # Instead of putting", "False # DISABLE_MAIN_ATOM_FEED = False # DISABLE_MAIN_RSS_FEED = False # Add the absolute", "© {date} <a href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}' RSS_COPYRIGHT_FORMATS", "[\"\\\\\\[\",\"\\\\\\]\"] ], # processEscapes: true # }, # displayAlign: 'center', // Change this", "= [] # Add the absolute paths to directories containing themes to use", "False implies SHOW_SOURCELINK = False COPY_SOURCES = False # Modify the number of", "{left: \"\\\\\\\\(\", right: \"\\\\\\\\)\", display: false} # ] # \"\"\" # What Markdown", "COMPILERS = { \"rest\": ['.rst', '.txt'], \"markdown\": ['.md', '.mdown', '.markdown'], \"textile\": ['.textile'], \"txt2tags\":", "site for Nikola.\" # (translatable) # What is the default language? DEFAULT_LANG =", "if that is, say, default.html, # it will instead /foo/default.html => /foo) STRIP_INDEXES", "and to inform search engines about /sitemapindex.xml. # ROBOTS_EXCLUSIONS = [\"/archive.html\", \"/category/*.html\"] #", "= True USE_BUNDLES = False # Plugins you don't want to use. Be", "this config: # MATHJAX_CONFIG = \"\"\" # <script type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({ # tex2jax:", "and bootstrap4 (commented) follow. # bootblog4 supports: featured_large featured_small featured_on_mobile # featured_large_image_on_mobile featured_strip_html", "# bootstrap4 supports: navbar_light (defaults to False) # navbar_custom_bg (defaults to '') #", "FRONT_INDEX_HEADER = { DEFAULT_LANG: '' } # URLs to other posts/pages can take", "assumes the file is HTML and just copies it COMPILERS = { \"rest\":", "# Show the first (remaining) two featured posts in small boxes. 'featured_small': False,", "the full path from the root # absolute: a complete URL (that includes", "will be asked to be excluded # from indexing and other robotic spidering.", "effective # if SITE_URL points to server root. The list is used to", "False # to the metadata. PRETTY_URLS = False # If True, publish future", "Strip HTML from featured post text. 'featured_strip_html': False, # Contents of the sidebar,", "= \"\"\" # <script type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({ # tex2jax: { # inlineMath: [", "for `new_post` is the first entry in the POSTS tuple. # # 'rest'", "are rendered the usual way (i.e. with the full templates). # The resulting", "you also need to add your COMMENT_SYSTEM_ID which # depends on what comment", "a language. # For example, to use British instead of US English: LOCALES", "use it # as an accent color (the default ones don’t). Must be", "to use KaTeX instead of MathJax? While KaTeX may not support every #", "local dates are used. (translatable) # Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting # Example for", "category publicly, you can mark it as hidden. # The category will not", "# If False, draft posts will not be deployed # DEPLOY_DRAFTS = True", "meaning. If set to False, these tags are handled like regular tags. USE_TAG_METADATA", "for the sidebar / navigation bar. (translatable) # This is a dict. The", "# For more details, read the manual: # https://getnikola.com/handbook.html#deploying-to-github # You will need", "mapping of languages to file-extensions that represent that language. # Feel free to", "tracking. FEED_LINKS_APPEND_QUERY = False # A HTML fragment describing the license, for the", "hope you don't mind ;-) # Note: most Nikola-specific extensions are done via", "code listings to be processed and published on # the site. The format", "the previewimage as its background. 'featured_large': False, # Show the first (remaining) two", "Or run `nikola check -l`. # You may also want to use github_deploy", "to make site loading faster in a HTTP/1.1 # environment but is not", "This was a hidden feature of the Markdown and reST compilers in the", "need to add your COMMENT_SYSTEM_ID which # depends on what comment system you", "means copy 'files' into 'output' # One or more folders containing code listings", "be configured with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS = {'images': 'images'} # IMAGE_THUMBNAIL_SIZE = 400 #", "\"\\\\\\\\(\", right: \"\\\\\\\\)\", display: false} # ] # \"\"\" # What Markdown extensions", "# Example for presets: {'preset': True, 'format': 'DATE_FULL'} # LUXON_DATE_FORMAT = { #", "# DATE_FORMAT = 'yyyy-MM-dd HH:mm' # Date format used to display post dates,", "= True # Where the output site should be located # If you", "More information # is in the manual. COMMENT_SYSTEM_ID = \"\" # Create index.html", "conflict with running # text!), just use this config: # MATHJAX_CONFIG = \"\"\"", "</script> # \"\"\" # Want to use KaTeX instead of MathJax? While KaTeX", "# </ul> # </div> # <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!-- End of social buttons", "\"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\", display: true}, # {left: \"$\", right: \"$\", display: false}, #", "is HTML and just copies it COMPILERS = { \"rest\": ['.rst', '.txt'], \"markdown\":", "# with many of the others. # \"pandoc\": ['.rst', '.md', '.txt'], } #", "is reStructuredText # 'markdown' is Markdown # 'html' assumes the file is HTML", "to False implies SHOW_SOURCELINK = False COPY_SOURCES = False # Modify the number", "it would conflict # with many of the others. # \"pandoc\": ['.rst', '.md',", "# will not be generated for that directory. # PAGE_INDEX = False #", "LISTINGS_FOLDERS = {'listings': 'listings'} # Which means process listings from 'listings' into 'output/listings'", "(i.e. with the full templates). # The resulting files have .php extensions, making", "feed files # ATOM_EXTENSION = \".atom\" # A list of redirection tuples, [(\"foo/from.html\",", "a backslash ('\\') to escape # a forward slash or a backslash (i.e.", "added before the file extension by default, # but a different naming template", "# \"pandoc\": ['.rst', '.md', '.txt'], } # Preferred metadata format for new posts", "plugin system, # with the MarkdownExtension class and should not be added here.", "if FEED_TEASERS is True (translatable) FEED_READ_MORE_LINK = '<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' # Append a", "one # None: show a grey square # '/url/to/file': show the image in", "# A list of redirection tuples, [(\"foo/from.html\", \"/bar/to.html\")]. # # A HTML file", "'output' # where the \"cache\" of partial generated content should be located #", "= False # If False, draft posts will not be deployed # DEPLOY_DRAFTS", "delimiters: [ # {left: \"$$\", right: \"$$\", display: true}, # {left: \"\\\\\\\\[\", right:", "If no arguments are specified, a preset # named `default` will be executed.", "destination}. # Default is: # FILES_FOLDERS = {'files': ''} # Which means copy", "One or more folders containing code listings to be processed and published on", "and 'private' have special # meaning. If set to False, these tags are", "github_deploy (see below). # You can define multiple presets and specify them as", "true}, # {left: \"$\", right: \"$\", display: false}, # {left: \"\\\\\\\\(\", right: \"\\\\\\\\)\",", "{'preset': True, 'format': 'DATE_FULL'} # LUXON_DATE_FORMAT = { # DEFAULT_LANG: {'preset': False, 'format':", "relative destination}. # Default is: # LISTINGS_FOLDERS = {'listings': 'listings'} # Which means", "(i.e. not posts)? # COMMENTS_IN_PAGES = False # Enable comments on picture gallery", "post in a large box, with the previewimage as its background. 'featured_large': False,", "/sitemapindex.xml. # ROBOTS_EXCLUSIONS = [\"/archive.html\", \"/category/*.html\"] # Instead of putting files in <slug>.html,", "extension); used for indexes. # (translatable) ATOM_FILENAME_BASE = \"feed\" # Extension for Atom", "be generated. HIDDEN_TAGS = ['mathjax'] # If CATEGORY_ALLOW_HIERARCHIES is set to True, categories", "each gallery GALLERIES_USE_THUMBNAIL = False # Image to use as thumbnail for those", "set to True and there is more than one # author, author pages", "Create index.html for page folders? # WARNING: if a page would conflict with", "A small copyright notice for the page footer (in HTML). # (translatable) CONTENT_FOOTER", "# a forward slash or a backslash (i.e. '\\//\\\\' is a path specifying", "the protocol (http/https)! SITE_URL = \"https://example.com/\" # This is the URL where Nikola's", "CONTENT_FOOTER.format(). This is done CONTENT_FOOTER_FORMATS = { DEFAULT_LANG: ( (), { \"email\": BLOG_EMAIL,", "(JS, using Luxon) # 2 = using a string like “2 days ago”", "code for AddThis (which was the default for a # long time). Insert", "# TEMPLATE_FILTERS = {} # Put in global_context things you want available on", "using github_deploy. GITHUB_REMOTE_NAME = 'origin' # Whether or not github_deploy should commit to", "desktop. 'featured_large_image_on_mobile': True, # Strip HTML from featured post text. 'featured_strip_html': False, #", "pages? # COMMENTS_IN_GALLERIES = False # What file should be used for directory", "allowed in deployed output # Only the individual posts are published/deployed; not in", "contains only the name of the leaf category and not the whole path.", "(translatable) # RSS_FILENAME_BASE = \"rss\" # Atom filename base (without extension); used for", "\"./es\", } # What will translated input files be named like? TRANSLATIONS_PATTERN =", "Default is: # FILES_FOLDERS = {'files': ''} # Which means copy 'files' into", "(translatable) # This is the main URL for your site. It will be", "= \"<EMAIL>\" BLOG_DESCRIPTION = \"This is a demo site for Nikola.\" # (translatable)", "# (translatable) # POSTS = ( (\"posts/*.rst\", \"posts\", \"post.tmpl\"), (\"posts/*.md\", \"posts\", \"post.tmpl\"), (\"posts/*.txt\",", "be copied as-is into the output. # The format is a dictionary of", "HTML fragments and diverse things that are used by the templates # #############################################################################", "a # relative URL. # # If you don't need any of these,", "deployment branch on GitHub. GITHUB_SOURCE_BRANCH = 'src' GITHUB_DEPLOY_BRANCH = 'master' # The name", "# to the location of conf.py # OUTPUT_FOLDER = 'output' # where the", "in a post. Useful for checking that # migration was successful. WARN_ABOUT_TAG_METADATA =", "days ago” (JS, using Luxon) # # Your theme must support it, Bootstrap", "# List of regular expressions, links matching them will always be considered #", "false} # ] # \"\"\" # What Markdown extensions to enable? # You", "a category publicly, you can mark it as hidden. # The category will", "as its background. 'featured_large': False, # Show the first (remaining) two featured posts", "for it yourself. # # The default compiler for `new_post` is the first", "# # Note that our use of \"server side includes\" / partials #", "Fully theme-dependent. (translatable) # Samples for bootblog4 (enabled) and bootstrap4 (commented) follow. #", "# You can also use 'full', 'long', 'medium', or 'short' # DATE_FORMAT =", "the file extension by default, # but a different naming template can be", "the (boot)Reveal theme must be added to the global context. # subtheme selection:", "blank to disable comments. COMMENT_SYSTEM = \"\" # And you also need to", "the index page, if INDEX_TEASERS is True (translatable) INDEX_READ_MORE_LINK = '<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>'", "# https://getnikola.com/handbook.html#deploying-to-github # You will need to configure the deployment branch on GitHub.", "} ) } # A simple copyright tag for inclusion in RSS feeds", "don't have one # None: show a grey square # '/url/to/file': show the", "is not displayed. 'sidebar': '' } } # POSTS and PAGES contains (wildcard,", "COMMENT_SYSTEM_ID which # depends on what comment system you use. The default is", "{author} {license}' RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS # To use comments, you can choose between", "different third party comment # systems. The following comment systems are supported by", "INDEX_DISPLAY_POST_COUNT = 10 # Extra things you want in the pages HEAD tag.", "output site should be located # If you don't use an absolute path,", "configuration # For more details, read the manual: # https://getnikola.com/handbook.html#deploying-to-github # You will", "[] # List of regular expressions, links matching them will always be considered", "on # the site. The format is a dictionary of {source: relative destination}.", "absolute: a complete URL (that includes the SITE_URL) # URL_TYPE = 'rel_path' #", "INDEX_READ_MORE_LINK = '<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' # 'Read more...' for the feeds, if FEED_TEASERS", "JS and CSS into single files to make site loading faster in a", "DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'}, # } # Date fanciness. # #", "have? # The format is {\"translationcode\" : \"path/to/translation\" } # the path will", "destination, template) tuples. # (translatable) # POSTS = ( (\"posts/*.rst\", \"posts\", \"post.tmpl\"), (\"posts/*.md\",", "# \"nikolademo\" which is a test account for Disqus. More information # is", "will still be generated. HIDDEN_TAGS = ['mathjax'] # If CATEGORY_ALLOW_HIERARCHIES is set to", "this option blank to disable comments. COMMENT_SYSTEM = \"\" # And you also", "for checking that # migration was successful. WARN_ABOUT_TAG_METADATA = False # Templates will", "adding # .. pretty_url: False # to the metadata. PRETTY_URLS = False #", "'rest' is reStructuredText # 'markdown' is Markdown # 'html' assumes the file is", "published on # the site. The format is a dictionary of {source: relative", "posts are allowed in deployed output # Only the individual posts are published/deployed;", "of the Nikola themes # repository. # EXTRA_THEMES_DIRS = [] # List of", "them without reconfiguring your server to recognize them. \"php\": ['.php'], # Pandoc detects", "inform search engines about /sitemapindex.xml. # ROBOTS_EXCLUSIONS = [\"/archive.html\", \"/category/*.html\"] # Instead of", "sidebar is not displayed. 'sidebar': '' } } # POSTS and PAGES contains", "# USE_KATEX = False # KaTeX auto-render settings. If you want support for", "test account for Disqus. More information # is in the manual. COMMENT_SYSTEM_ID =", "# Want to use KaTeX instead of MathJax? While KaTeX may not support", "'{name}.thumbnail{ext}' # ############################################################################# # HTML fragments and diverse things that are used by", "1 # If set to True, the tags 'draft', 'mathjax' and 'private' have", "used to exclude resources from # /robots.txt and /sitemap.xml, and to inform search", "the hierarchy must be specified, # using a forward slash ('/') to separate", "if INDEX_TEASERS is True (translatable) INDEX_READ_MORE_LINK = '<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' # 'Read more...'", "traffic source tracking. FEED_LINKS_APPEND_QUERY = False # A HTML fragment describing the license,", "need help defining # those. # TEMPLATE_FILTERS = {} # Put in global_context", "a `nikola deploy` command as you like. # DEPLOY_COMMANDS = { # 'default':", "to push to, using github_deploy. GITHUB_REMOTE_NAME = 'origin' # Whether or not github_deploy", "of putting files in <slug>.html, put them in <slug>/index.html. # No web server", "you need help defining # those. # TEMPLATE_FILTERS = {} # Put in", "# MATHJAX_CONFIG = \"\" # If you want support for the $.$ syntax", "input from the source filename # but is disabled by default as it", "a hidden feature of the Markdown and reST compilers in the # past.", "your post titles are in <h1> tags too, for # example. # (defaults", "POSTS = ( (\"posts/*.rst\", \"posts\", \"post.tmpl\"), (\"posts/*.md\", \"posts\", \"post.tmpl\"), (\"posts/*.txt\", \"posts\", \"post.tmpl\"), (\"posts/*.html\",", "Atom feed files # ATOM_EXTENSION = \".atom\" # A list of redirection tuples,", "# LICENSE = \"\"\" # <a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img alt=\"Creative Commons License", "filename # but is disabled by default as it would conflict # with", "A HTML file will be created in output/foo/from.html that redirects # to the", "BLOG_AUTHOR = \"<NAME>\" # (translatable) BLOG_TITLE = \"My Nikola Site\" # (translatable) #", "False COPY_SOURCES = False # Modify the number of Post per Index Page", "then do a backup, or run `nikola ping` from the `ping` # plugin", "use them. # For example, the `v7` directory of your clone of the", "configuration. Fully theme-dependent. (translatable) # Samples for bootblog4 (enabled) and bootstrap4 (commented) follow.", "LUXON_DATE_FORMAT and local user time (JS, using Luxon) # 2 = using a", "generated for that directory. # PAGE_INDEX = False # Enable comments on pages", "and /sitemap.xml, and to inform search engines about /sitemapindex.xml. # ROBOTS_EXCLUSIONS = [\"/archive.html\",", "are used by the templates # ############################################################################# # 'Read more...' for the index", "\"Archive\"), (\"/categories/\", \"Tags\"), (\"/rss.xml\", \"RSS feed\"), ), } # Alternative navigation links. Works", "# delimiters: [ # {left: \"$$\", right: \"$$\", display: true}, # {left: \"\\\\\\\\[\",", "the `plugins` directory of your clone of the Nikola plugins # repository. #", "MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc'] # Options to be passed to markdown", "&copy; {date} <a href=\"mailto:{email}\">{author}</a> - Powered by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}' # Things", "or even make it empty (which is # the default right now) #", "other robotic spidering. * is supported. Will only be effective # if SITE_URL", "= { DEFAULT_LANG: \"\", # Example for another language: # \"es\": \"./es\", }", "Example for another language: # \"es\": \"./es\", } # What will translated input", "recommend using the Creative Commons' wizard: # https://creativecommons.org/choose/ # LICENSE = \"\"\" #", "# markdown.extensions.meta is required for Markdown metadata. MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc']", "I recommend using the Creative Commons' wizard: # https://creativecommons.org/choose/ # LICENSE = \"\"\"", "Example for presets: {'preset': True, 'format': 'DATE_FULL'} # LUXON_DATE_FORMAT = { # DEFAULT_LANG:", "used for traffic source tracking. FEED_LINKS_APPEND_QUERY = False # A HTML fragment describing", "# The name of the remote where you wish to push to, using", "account for Disqus. More information # is in the manual. COMMENT_SYSTEM_ID = \"\"", "# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml. # ROBOTS_EXCLUSIONS", "with care. # DISABLE_INDEXES = False # DISABLE_MAIN_ATOM_FEED = False # DISABLE_MAIN_RSS_FEED =", "\"https://example.com/\" BLOG_EMAIL = \"<EMAIL>\" BLOG_DESCRIPTION = \"This is a demo site for Nikola.\"", "themes # repository. # EXTRA_THEMES_DIRS = [] # List of regular expressions, links", "JS) # 1 = using LUXON_DATE_FORMAT and local user time (JS, using Luxon)", "RSS_COPYRIGHT = 'Contents © {date} <a href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN = 'Contents © {date}", "to use as thumbnail for those galleries that don't have one # None:", "true}, # {left: \"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\", display: true}, # {left: \"$\", right: \"$\",", "in a HTTP/1.1 # environment but is not recommended for HTTP/2.0 when caching", ".php extensions, making it possible to run # them without reconfiguring your server", "list page and posts. # Tag pages will still be generated. HIDDEN_AUTHORS =", "\"Europe/London\" # Date format used to display post dates. (translatable) # Used by", "NAVIGATION_ALT_LINKS = { DEFAULT_LANG: () } # Name of the theme to use.", "them only on desktop. 'featured_large_image_on_mobile': True, # Strip HTML from featured post text.", "# http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses the INDEX_FILE setting, so if that is,", "it might be displayed by some browsers as # the browser UI color", "represent that language. # Feel free to add or delete extensions to any", "# <a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img alt=\"Creative Commons License BY-NC-SA\" # style=\"border-width:0; margin-bottom:12px;\"", "are used. (translatable) # Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting # Example for presets: {'preset':", "'cache' # ############################################################################# # Image Gallery Options # ############################################################################# # Use a thumbnail", "required. Also enables STRIP_INDEXES. # This can be disabled on a per-page/post basis", "are in <h1> tags too, for # example. # (defaults to 1.) #", "deployed # DEPLOY_DRAFTS = True # Allows scheduling of posts using the rule", "config: # MATHJAX_CONFIG = \"\"\" # <script type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({ # tex2jax: {", "it empty (which is # the default right now) # (translatable) # SOCIAL_BUTTONS_CODE", "you want to use # another time zone, please set TIMEZONE to match.", "Default is {} (no config at all) # MARKDOWN_EXTENSION_CONFIGS = {} # Social", "{ \"rest\": ['.rst', '.txt'], \"markdown\": ['.md', '.mdown', '.markdown'], \"textile\": ['.textile'], \"txt2tags\": ['.t2t'], \"bbcode\":", "are handled like regular tags. USE_TAG_METADATA = False # If set to True,", "(default) # full_path: a URL with the full path from the root #", "Add the absolute paths to directories containing themes to use them. # For", "to be processed and published on # the site. The format is a", "This is the configuration of Nikola. !! # # !! You should edit", "'files' into 'output' # One or more folders containing code listings to be", "pages will still be generated. HIDDEN_CATEGORIES = [] # If ENABLE_AUTHOR_PAGES is set", "must support it, Bootstrap already does. # DATE_FANCINESS = 0 # Customize the", "# What is the default language? DEFAULT_LANG = \"en\" # What other languages", "\"\"\" # <!-- Social buttons --> # <div id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style", "the page footer (in HTML). # (translatable) CONTENT_FOOTER = 'Contents &copy; {date} <a", "translated input files be named like? TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' # Links for the", "generated. HIDDEN_AUTHORS = ['Guest'] # Optional HTML that displayed on “main” blog index.html", "expressions, links matching them will always be considered # valid by \"nikola check", "way NAVIGATION_LINKS does, # although themes may not always support them. (translatable) #", "author will not be displayed on the author list page and posts. #", "first (remaining) two featured posts in small boxes. 'featured_small': False, # Show featured", "# </script> # \"\"\" # Want to use KaTeX instead of MathJax? While", "DATE_FORMAT = 'yyyy-MM-dd HH:mm' # Date format used to display post dates, if", "in Atom and RSS feeds. Advanced # option used for traffic source tracking.", "a post. Useful for checking that # migration was successful. WARN_ABOUT_TAG_METADATA = False", "accent color (the default ones don’t). Must be a HEX value. THEME_COLOR =", "any of these, just set to [] REDIRECTIONS = [] # Presets of", "# INDEX_DISPLAY_POST_COUNT = 10 # Extra things you want in the pages HEAD", "that the \"from\" side MUST be a # relative URL. # # If", "base (without extension); used for indexes. # (translatable) ATOM_FILENAME_BASE = \"feed\" # Extension", "BLOG_EMAIL = \"<EMAIL>\" BLOG_DESCRIPTION = \"This is a demo site for Nikola.\" #", "output. # The format is a dictionary of {source: relative destination}. # Default", "reStructuredText # 'markdown' is Markdown # 'html' assumes the file is HTML and", "= \".xml\" # RSS filename base (without extension); used for indexes and galleries.", "And then do a backup, or run `nikola ping` from the `ping` #", "# relative URL. # # If you don't need any of these, just", "browser UI color (eg. Chrome on Android). Other themes might also use it", "\"rsync -rav --delete output/ joe@my.site:/srv/www/site\" # And then do a backup, or run", "grey square # '/url/to/file': show the image in that url GALLERIES_DEFAULT_THUMBNAIL = None", "files. # May be used for a greeting. (translatable) FRONT_INDEX_HEADER = { DEFAULT_LANG:", "still be generated. HIDDEN_AUTHORS = ['Guest'] # Optional HTML that displayed on “main”", "copies it COMPILERS = { \"rest\": ['.rst', '.txt'], \"markdown\": ['.md', '.mdown', '.markdown'], \"textile\":", "# Extension for Atom feed files # ATOM_EXTENSION = \".atom\" # A list", "# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value. #", "most Nikola-specific extensions are done via the Nikola plugin system, # with the", "to use github_deploy (see below). # You can define multiple presets and specify", "was a hidden feature of the Markdown and reST compilers in the #", "\"$\", display: false}, # {left: \"\\\\\\\\(\", right: \"\\\\\\\\)\", display: false} # ] #", "# A HTML fragment describing the license, for the sidebar. # (translatable) LICENSE", "Useful for checking that # migration was successful. WARN_ABOUT_TAG_METADATA = False # Templates", "= using DATE_FORMAT and TIMEZONE (without JS) # 1 = using LUXON_DATE_FORMAT and", "with template # GLOBAL_CONTEXT as parameter when the template is about to be", "{ DEFAULT_LANG: ( (), { \"email\": BLOG_EMAIL, \"author\": BLOG_AUTHOR, \"date\": time.gmtime().tm_year, \"license\": LICENSE", "type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({ # tex2jax: { # inlineMath: [ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ], #", "featured_on_mobile # featured_large_image_on_mobile featured_strip_html sidebar # bootstrap4 supports: navbar_light (defaults to False) #", "= \"rss\" # Atom filename base (without extension); used for indexes. # (translatable)", "# One or more folders containing files to be copied as-is into the", "the 'draft', 'mathjax' # and 'private' tags are found in a post. Useful", "# EXTRA_HEAD_DATA = \"\" # Google Analytics or whatever else you use. Added", "HH:mm' # Date format used to display post dates, if local dates are", "(http/https)! SITE_URL = \"https://example.com/\" # This is the URL where Nikola's output will", "you do not want to display a category publicly, you can mark it", "'\\' of the top-level category called '/'). CATEGORY_ALLOW_HIERARCHIES = False # If CATEGORY_OUTPUT_FLAT_HIERARCHY", "# EXTRA_THEMES_DIRS = [] # List of regular expressions, links matching them will", "# (Bootstrap 4: right-side of navbar, Bootblog 4: right side of title) NAVIGATION_ALT_LINKS", "'en_GB'} # LOCALES = {} # One or more folders containing files to", "<slug>/index.html. # No web server configuration is required. Also enables STRIP_INDEXES. # This", "Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting # Example for presets: {'preset': True, 'format': 'DATE_FULL'} #", "tuples, [(\"foo/from.html\", \"/bar/to.html\")]. # # A HTML file will be created in output/foo/from.html", "# systems. The following comment systems are supported by Nikola: # disqus, facebook,", "# WARNING: if a page would conflict with the index file (usually #", "right: \"$\", display: false}, # {left: \"\\\\\\\\(\", right: \"\\\\\\\\)\", display: false} # ]", "The author will not be displayed on the author list page and posts.", "of galleries for each gallery GALLERIES_USE_THUMBNAIL = False # Image to use as", "= ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc'] # Options to be passed to markdown extensions", "of files relative to the server root (!) that will be asked to", "Whether or not github_deploy should commit to the source branch automatically # before", "number of Post per Index Page # Defaults to 10 # INDEX_DISPLAY_POST_COUNT =", "still be generated. HIDDEN_CATEGORIES = [] # If ENABLE_AUTHOR_PAGES is set to True", "# rel_path: a relative URL to the current page/post (default) # full_path: a", "DATE_FANCINESS = 0 # Customize the locale/region used for a language. # For", "<img alt=\"Creative Commons License BY-NC-SA\" # style=\"border-width:0; margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A small", "copyright tag for inclusion in RSS feeds that works just # like CONTENT_FOOTER", "The name of the remote where you wish to push to, using github_deploy.", "will be used # in a prominent link. Don't forget the protocol (http/https)!", "HEAD tag. This will be added right # before </head> # (translatable) #", "# HTML fragments and diverse things that are used by the templates #", "your server to recognize them. \"php\": ['.php'], # Pandoc detects the input from", "bar. (translatable) # This is a dict. The keys are languages, and values", "to display an author publicly, you can mark it as hidden. # The", "{source: relative destination}. # Default is: # FILES_FOLDERS = {'files': ''} # Which", "Gallery Options # ############################################################################# # Use a thumbnail (defined by \".. previewimage:\" in", "{ DEFAULT_LANG: ( (\"/archive.html\", \"Archive\"), (\"/categories/\", \"Tags\"), (\"/rss.xml\", \"RSS feed\"), ), } #", "will always be considered # valid by \"nikola check -l\" # LINK_CHECK_WHITELIST =", "# DATE_FANCINESS = 0 # Customize the locale/region used for a language. #", "deploying. GITHUB_COMMIT_SOURCE = True # Where the output site should be located #", "), } # Alternative navigation links. Works the same way NAVIGATION_LINKS does, #", "\".atom\" # A list of redirection tuples, [(\"foo/from.html\", \"/bar/to.html\")]. # # A HTML", "use KaTeX instead of MathJax? While KaTeX may not support every # feature", "Added to the bottom of <body> # in the default template (base.tmpl). #", "the default template (base.tmpl). # (translatable) # BODY_END = \"\" # Bundle JS", "# You will also get gist, nikola and podcast because those are #", "class and should not be added here. # Defaults are markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta", "be considered as relative # to the location of conf.py # OUTPUT_FOLDER =", "{left: \"\\\\\\\\[\", right: \"\\\\\\\\]\", display: true}, # {left: \"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\", display: true},", "addthis_32x32_style\"> # <a class=\"addthis_button_more\">Share</a> # <ul><li><a class=\"addthis_button_facebook\"></a> # <li><a class=\"addthis_button_google_plusone_share\"></a> # <li><a class=\"addthis_button_linkedin\"></a>", "and published on # the site. The format is a dictionary of {source:", "setting, so if that is, say, default.html, # it will instead /foo/default.html =>", "excluded # from indexing and other robotic spidering. * is supported. Will only", "https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE = '' # If True, use the scheduling rule to", "languages, and values are tuples. NAVIGATION_LINKS = { DEFAULT_LANG: ( (\"/archive.html\", \"Archive\"), (\"/categories/\",", "free to add or delete extensions to any list, but don't add any", "['.md', '.mdown', '.markdown'], \"textile\": ['.textile'], \"txt2tags\": ['.t2t'], \"bbcode\": ['.bb'], \"wiki\": ['.wiki'], \"ipynb\": ['.ipynb'],", "used by the templates # ############################################################################# # 'Read more...' for the index page,", "Setting it to False implies SHOW_SOURCELINK = False COPY_SOURCES = False # Modify", "{'listings': 'listings'} # Which means process listings from 'listings' into 'output/listings' # A", "utterances # You can leave this option blank to disable comments. COMMENT_SYSTEM =", "should be located # default: 'cache' # CACHE_FOLDER = 'cache' # ############################################################################# #", "support them. (translatable) # (Bootstrap 4: right-side of navbar, Bootblog 4: right side", "(\"posts/*.rst\", \"posts\", \"post.tmpl\"), (\"posts/*.md\", \"posts\", \"post.tmpl\"), (\"posts/*.txt\", \"posts\", \"post.tmpl\"), (\"posts/*.html\", \"posts\", \"post.tmpl\"), )", "} # github_deploy configuration # For more details, read the manual: # https://getnikola.com/handbook.html#deploying-to-github", "are markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta is required for Markdown metadata. MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite',", "take 3 forms: # rel_path: a relative URL to the current page/post (default)", "WARN_ABOUT_TAG_METADATA = False # Templates will use those filters, along with the defaults.", "# <script type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({ # tex2jax: { # inlineMath: [ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"]", "will be executed. You can use as many presets # in a `nikola", "posts # \"YAML\": YAML wrapped in \"---\" METADATA_FORMAT = \"YAML\" # If you", "\"rest\": ['.rst', '.txt'], \"markdown\": ['.md', '.mdown', '.markdown'], \"textile\": ['.textile'], \"txt2tags\": ['.t2t'], \"bbcode\": ['.bb'],", "defaults to SITE_URL # BASE_URL = \"https://example.com/\" BLOG_EMAIL = \"<EMAIL>\" BLOG_DESCRIPTION = \"This", "support for the $.$ syntax (which may conflict with running # text!), just", "For example, to use British instead of US English: LOCALES = {'en': 'en_GB'}", "'featured_large': False, # Show the first (remaining) two featured posts in small boxes.", "mind ;-) # Note: most Nikola-specific extensions are done via the Nikola plugin", "would conflict with the index file (usually # caused by setting slug to", "alt=\"Creative Commons License BY-NC-SA\" # style=\"border-width:0; margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A small copyright", "# The tag will not be displayed on the tag list page and", "Date format used to display post dates. (translatable) # Used by babel.dates, CLDR", "repository. # EXTRA_THEMES_DIRS = [] # List of regular expressions, links matching them", "BLOG_EMAIL, \"author\": BLOG_AUTHOR, \"date\": time.gmtime().tm_year, \"license\": LICENSE } ) } # A simple", "# subcategory called '\\' of the top-level category called '/'). CATEGORY_ALLOW_HIERARCHIES = False", "list of redirection tuples, [(\"foo/from.html\", \"/bar/to.html\")]. # # A HTML file will be", "displayed on the category list page. # Category pages will still be generated.", "the interface for it yourself. # # The default compiler for `new_post` is", "the file is HTML and just copies it COMPILERS = { \"rest\": ['.rst',", "feed files # RSS_EXTENSION = \".xml\" # RSS filename base (without extension); used", "False, these tags are handled like regular tags. USE_TAG_METADATA = False # If", "{\"translationcode\" : \"path/to/translation\" } # the path will be used as a prefix", "will not be displayed on the tag list page and posts. # Tag", "# If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output #", "=> /foo) STRIP_INDEXES = False # List of files relative to the server", "# the default right now) # (translatable) # SOCIAL_BUTTONS_CODE = \"\"\" # <!--", "to add your COMMENT_SYSTEM_ID which # depends on what comment system you use.", "# <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!-- End of social buttons --> # \"\"\" #", "NAVIGATION_LINKS = { DEFAULT_LANG: ( (\"/archive.html\", \"Archive\"), (\"/categories/\", \"Tags\"), (\"/rss.xml\", \"RSS feed\"), ),", "just copies it COMPILERS = { \"rest\": ['.rst', '.txt'], \"markdown\": ['.md', '.mdown', '.markdown'],", "path in the hierarchy must be specified, # using a forward slash ('/')", "is optional # Post's dates are considered in UTC by default, if you", "= 'Contents © {date} <a href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author}", "the feeds, if FEED_TEASERS is True (translatable) FEED_READ_MORE_LINK = '<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' #", "= 400 # IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}' # ############################################################################# # HTML fragments and diverse", "# DEMOTE_HEADERS = 1 # If set to True, the tags 'draft', 'mathjax'", "the output written to output # contains only the name of the leaf", "Pandoc detects the input from the source filename # but is disabled by", "'' # If True, use the scheduling rule to all posts (not pages!)", "# LINK_CHECK_WHITELIST = [] # The <hN> tags in HTML generated by certain", "# URL_TYPE = 'full_path' # Extension for RSS feed files # RSS_EXTENSION =", "Nikola themes # repository. # EXTRA_THEMES_DIRS = [] # List of regular expressions,", "optional # Post's dates are considered in UTC by default, if you want", "from Wikipedia: TIMEZONE = \"Europe/London\" # Date format used to display post dates.", "so on) # This was a hidden feature of the Markdown and reST", "don't need any of these, just set to [] REDIRECTIONS = [] #", "that url GALLERIES_DEFAULT_THUMBNAIL = None # Images will be scaled down according to", "# DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'}, # } # Date fanciness. #", "GALLERIES_USE_THUMBNAIL = False # Image to use as thumbnail for those galleries that", "a # long time). Insert anything you want here, or even make it", "(JS, using Luxon) # # Your theme must support it, Bootstrap already does.", "and specify them as arguments # to `nikola deploy`. If no arguments are", "False. # FUTURE_IS_NOW = False # If True, future dated posts are allowed", "wizard: # https://creativecommons.org/choose/ # LICENSE = \"\"\" # <a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img", "US English: LOCALES = {'en': 'en_GB'} # LOCALES = {} # One or", "# although themes may not always support them. (translatable) # (Bootstrap 4: right-side", "# <li><a class=\"addthis_button_twitter\"></a> # </ul> # </div> # <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!-- End", "set to True, a warning is issued if one of the 'draft', 'mathjax'", "the configuration of Nikola. !! # # !! You should edit it to", "Will only be effective # if SITE_URL points to server root. The list", "# # If you don't need any of these, just set to []", "= False # DISABLE_MAIN_ATOM_FEED = False # DISABLE_MAIN_RSS_FEED = False # Add the", "(the thumbnail has ``.thumbnail`` added before the file extension by default, # but", "right: \"\\\\\\\\end{equation*}\", display: true}, # {left: \"$\", right: \"$\", display: false}, # {left:", "margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A small copyright notice for the page footer (in", "# but is disabled by default as it would conflict # with many", "# Links for the sidebar / navigation bar. (translatable) # This is a", "file extension by default, # but a different naming template can be configured", "(without extension); used for indexes. # (translatable) ATOM_FILENAME_BASE = \"feed\" # Extension for", "the POSTS tuple. # # 'rest' is reStructuredText # 'markdown' is Markdown #", "Analytics or whatever else you use. Added to the bottom of <body> #", "you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value. # DEPLOY_FUTURE =", "better. # USE_KATEX = False # KaTeX auto-render settings. If you want support", "the tag list page and posts. # Tag pages will still be generated.", "of the Nikola plugins # repository. # EXTRA_PLUGINS_DIRS = [] # Add the", "a prefix for the generated pages location TRANSLATIONS = { DEFAULT_LANG: \"\", #", "# Defaults to index.html # Common other alternatives: default.html for IIS, index.php #", "2 = using a string like “2 days ago” (JS, using Luxon) #", "be considered # valid by \"nikola check -l\" # LINK_CHECK_WHITELIST = [] #", "'master' # The name of the remote where you wish to push to,", "you want left-aligned equations. # \"HTML-CSS\": { # styles: {'.MathJax_Display': {\"margin\": 0}} #", "= False # A HTML fragment describing the license, for the sidebar. #", "automatically # before deploying. GITHUB_COMMIT_SOURCE = True # Where the output site should", "you don't want to use. Be careful :-) # DISABLED_PLUGINS = [\"render_galleries\"] #", "If set to True, a warning is issued if one of the 'draft',", "mobile. # `featured_small` displays them only on desktop. 'featured_large_image_on_mobile': True, # Strip HTML", "\"post.tmpl\"), (\"posts/*.txt\", \"posts\", \"post.tmpl\"), (\"posts/*.html\", \"posts\", \"post.tmpl\"), ) PAGES = ( (\"pages/*.rst\", \"\",", "point, everything is optional # Post's dates are considered in UTC by default,", "{license}' # Things that will be passed to CONTENT_FOOTER.format(). This is done CONTENT_FOOTER_FORMATS", "publish future dated posts right away instead of scheduling them. # Defaults to", "ping`). Or run `nikola check -l`. # You may also want to use", "preset # named `default` will be executed. You can use as many presets", "display: true}, # {left: \"$\", right: \"$\", display: false}, # {left: \"\\\\\\\\(\", right:", "example, to use British instead of US English: LOCALES = {'en': 'en_GB'} #", "DEFAULT_LANG: { # Show the latest featured post in a large box, with", "to, using github_deploy. GITHUB_REMOTE_NAME = 'origin' # Whether or not github_deploy should commit", "system, # with the MarkdownExtension class and should not be added here. #", "= CONTENT_FOOTER_FORMATS # To use comments, you can choose between different third party", "# \"\"\" # Want to use KaTeX instead of MathJax? While KaTeX may", "You can use as many presets # in a `nikola deploy` command as", "{date} <a href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}' RSS_COPYRIGHT_FORMATS =", "# IMAGE_THUMBNAIL_SIZE = 400 # IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}' # ############################################################################# # HTML fragments", "# Options to be passed to markdown extensions (See https://python-markdown.github.io/reference/) # Default is", "will not be generated for that directory. # PAGE_INDEX = False # Enable", "Markdown # 'html' assumes the file is HTML and just copies it COMPILERS", "to add or delete extensions to any list, but don't add any new", "# with the MarkdownExtension class and should not be added here. # Defaults", "for bootblog4 (enabled) and bootstrap4 (commented) follow. # bootblog4 supports: featured_large featured_small featured_on_mobile", "list from Wikipedia: TIMEZONE = \"Europe/London\" # Date format used to display post", "MathJax? While KaTeX may not support every # feature yet, it's faster and", "# This is the main URL for your site. It will be used", "boxes. 'featured_small': False, # Show featured posts on mobile. 'featured_on_mobile': True, # Show", "# (translatable) # This is the main URL for your site. It will", "post, the whole path in the hierarchy must be specified, # using a", "you can mark it as hidden. # The category will not be displayed", "wish to push to, using github_deploy. GITHUB_REMOTE_NAME = 'origin' # Whether or not", "manual: # https://getnikola.com/handbook.html#deploying-to-github # You will need to configure the deployment branch on", "to 'left' if you want left-aligned equations. # \"HTML-CSS\": { # styles: {'.MathJax_Display':", "for IIS, index.php # INDEX_FILE = \"index.html\" # If a link ends in", "# -*- coding: utf-8 -*- import time # !! This is the configuration", "metadata. MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc'] # Options to be passed to", "= \"YAML\" # If you do not want to display a tag publicly,", "BLOG_AUTHOR, \"date\": time.gmtime().tm_year, \"license\": LICENSE } ) } # A simple copyright tag", "can use as many presets # in a `nikola deploy` command as you", "BY-NC-SA\" # style=\"border-width:0; margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A small copyright notice for the", "theme-dependent. (translatable) # Samples for bootblog4 (enabled) and bootstrap4 (commented) follow. # bootblog4", "(translatable) FRONT_INDEX_HEADER = { DEFAULT_LANG: '' } # URLs to other posts/pages can", "to be visible on the site # (the thumbnail has ``.thumbnail`` added before", "# conflict with running text!), just use this config: # KATEX_AUTO_RENDER = \"\"\"", "-l\" # LINK_CHECK_WHITELIST = [] # The <hN> tags in HTML generated by", "# options, but will have to be referenced manually to be visible on", "that # migration was successful. WARN_ABOUT_TAG_METADATA = False # Templates will use those", "\"from\" side MUST be a # relative URL. # # If you don't", "WARNING: if a page would conflict with the index file (usually # caused", "by default, if you want to use # another time zone, please set", "indexes? # Defaults to index.html # Common other alternatives: default.html for IIS, index.php", "global context. # subtheme selection: beige/serif/simple/sky/night/default # transition selection: cube/page/concave/linear/none/default GLOBAL_CONTEXT.update({ 'subtheme': 'simple',", "{'files': ''} # Which means copy 'files' into 'output' # One or more", "Specify an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE = '' # If True,", "\"\" # And you also need to add your COMMENT_SYSTEM_ID which # depends", "here. # Defaults are markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta is required for Markdown metadata. MARKDOWN_EXTENSIONS", "certain compilers (reST/Markdown) # will be demoted by that much (1 → h1", "run `nikola check -l`. # You may also want to use github_deploy (see", "} # POSTS and PAGES contains (wildcard, destination, template) tuples. # (translatable) #", "'long', 'medium', or 'short' # DATE_FORMAT = 'yyyy-MM-dd HH:mm' # Date format used", "used to display post dates, if local dates are used. (translatable) # Used", "'Contents © {date} <a href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}'", "can leave this option blank to disable comments. COMMENT_SYSTEM = \"\" # And", "a backslash (i.e. '\\//\\\\' is a path specifying the # subcategory called '\\'", "to True, the output written to output # contains only the name of", "# \"es\": \"./es\", } # What will translated input files be named like?", "a greeting. (translatable) FRONT_INDEX_HEADER = { DEFAULT_LANG: '' } # URLs to other", "available on all your templates. # It can be anything, data, functions, modules,", "BLOG_TITLE = \"My Nikola Site\" # (translatable) # This is the main URL", "Default is: # LISTINGS_FOLDERS = {'listings': 'listings'} # Which means process listings from", "set to True, the output written to output # contains only the name", "can be disabled on a per-page/post basis by adding # .. pretty_url: False", "This will be added right # before </head> # (translatable) # EXTRA_HEAD_DATA =", "index file (usually # caused by setting slug to `index`), the PAGE_INDEX #", "draft posts will not be deployed # DEPLOY_DRAFTS = True # Allows scheduling", "it will instead /foo/default.html => /foo) STRIP_INDEXES = False # List of files", "DEMOTE_HEADERS = 1 # If set to True, the tags 'draft', 'mathjax' and", "with the index file (usually # caused by setting slug to `index`), the", "dates, if local dates are used. (translatable) # Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting #", "relative # to the location of conf.py # OUTPUT_FOLDER = 'output' # where", "nikola and podcast because those are # done in the code, hope you", "now) # (translatable) # SOCIAL_BUTTONS_CODE = \"\"\" # <!-- Social buttons --> #", "'default': [ # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\", # ] # } #", "display: false} # ] # \"\"\" # What Markdown extensions to enable? #", "Defaults are markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta is required for Markdown metadata. MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code',", "USE_TAG_METADATA = False # If set to True, a warning is issued if", "hierarchies. For a post, the whole path in the hierarchy must be specified,", "# Instead of putting files in <slug>.html, put them in <slug>/index.html. # No", "and values are tuples. NAVIGATION_LINKS = { DEFAULT_LANG: ( (\"/archive.html\", \"Archive\"), (\"/categories/\", \"Tags\"),", "with the full templates). # The resulting files have .php extensions, making it", "that language. # Feel free to add or delete extensions to any list,", "considered as relative # to the location of conf.py # OUTPUT_FOLDER = 'output'", "index) in # list of galleries for each gallery GALLERIES_USE_THUMBNAIL = False #", "category list page. # Category pages will still be generated. HIDDEN_CATEGORIES = []", "= False # If you do not want to display an author publicly,", "generated. HIDDEN_CATEGORIES = [] # If ENABLE_AUTHOR_PAGES is set to True and there", "can mark it as hidden. # The author will not be displayed on", "(that includes the SITE_URL) # URL_TYPE = 'rel_path' # # Note that our", "will be called with template # GLOBAL_CONTEXT as parameter when the template is", "h1 will become h2 and so on) # This was a hidden feature", "or more folders containing files to be copied as-is into the output. #", "the full templates). # The resulting files have .php extensions, making it possible", "is required. Also enables STRIP_INDEXES. # This can be disabled on a per-page/post", "that displayed on “main” blog index.html files. # May be used for a", "and so on) # This was a hidden feature of the Markdown and", "and MAX_IMAGE_SIZE # options, but will have to be referenced manually to be", "# long time). Insert anything you want here, or even make it empty", "= {} # Social buttons. This is sample code for AddThis (which was", "GLOBAL_CONTEXT = {} # Add functions here and they will be called with", "= False # If you do not want to display a category publicly,", "= 'origin' # Whether or not github_deploy should commit to the source branch", "right now) # (translatable) # SOCIAL_BUTTONS_CODE = \"\"\" # <!-- Social buttons -->", "# If set to True, a warning is issued if one of the", "True, a warning is issued if one of the 'draft', 'mathjax' # and", "the generated pages location TRANSLATIONS = { DEFAULT_LANG: \"\", # Example for another", "= { # DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'}, # } # Date", "folders containing code listings to be processed and published on # the site.", "like. # DEPLOY_COMMANDS = { # 'default': [ # \"rsync -rav --delete output/", "use comments, you can choose between different third party comment # systems. The", "set, defaults to SITE_URL # BASE_URL = \"https://example.com/\" BLOG_EMAIL = \"<EMAIL>\" BLOG_DESCRIPTION =", "the source filename # but is disabled by default as it would conflict", "Bootblog 4: right side of title) NAVIGATION_ALT_LINKS = { DEFAULT_LANG: () } #", "navbar, Bootblog 4: right side of title) NAVIGATION_ALT_LINKS = { DEFAULT_LANG: () }", "to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE # options, but will have to be referenced manually", "already does. # DATE_FANCINESS = 0 # Customize the locale/region used for a", "input files be named like? TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' # Links for the sidebar", "URLs to other posts/pages can take 3 forms: # rel_path: a relative URL", "true}, # {left: \"\\\\\\\\[\", right: \"\\\\\\\\]\", display: true}, # {left: \"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\",", "RSS_EXTENSION = \".xml\" # RSS filename base (without extension); used for indexes and", "remote where you wish to push to, using github_deploy. GITHUB_REMOTE_NAME = 'origin' #", "# Settings for the (boot)Reveal theme must be added to the global context.", "= \"en\" # What other languages do you have? # The format is", "the output. # The format is a dictionary of {source: relative destination}. #", "If empty, the sidebar is not displayed. 'sidebar': '' } } # POSTS", "( (\"/archive.html\", \"Archive\"), (\"/categories/\", \"Tags\"), (\"/rss.xml\", \"RSS feed\"), ), } # Alternative navigation", "if you want to use # another time zone, please set TIMEZONE to", "a path specifying the # subcategory called '\\' of the top-level category called", "] # } # github_deploy configuration # For more details, read the manual:", "\"cache\" of partial generated content should be located # default: 'cache' # CACHE_FOLDER", "default compiler for `new_post` is the first entry in the POSTS tuple. #", "arguments # to `nikola deploy`. If no arguments are specified, a preset #", "Below this point, everything is optional # Post's dates are considered in UTC", "= False # If True, future dated posts are allowed in deployed output", "for # example. # (defaults to 1.) # DEMOTE_HEADERS = 1 # If", "displayAlign: 'center', // Change this to 'left' if you want left-aligned equations. #", "in /index.html, drop the index.html part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses the", "of US English: LOCALES = {'en': 'en_GB'} # LOCALES = {} # One", "galleries for each gallery GALLERIES_USE_THUMBNAIL = False # Image to use as thumbnail", "<li><a class=\"addthis_button_google_plusone_share\"></a> # <li><a class=\"addthis_button_linkedin\"></a> # <li><a class=\"addthis_button_twitter\"></a> # </ul> # </div> #", "and not the whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY = False # If you do not", "processed and published on # the site. The format is a dictionary of", "id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style\"> # <a class=\"addthis_button_more\">Share</a> # <ul><li><a class=\"addthis_button_facebook\"></a> #", "by \".. previewimage:\" in the gallery's index) in # list of galleries for", "= 'yyyy-MM-dd HH:mm' # Date format used to display post dates, if local", "URL for your site. It will be used # in a prominent link.", "you have? # The format is {\"translationcode\" : \"path/to/translation\" } # the path", "-*- coding: utf-8 -*- import time # !! This is the configuration of", "True # Allows scheduling of posts using the rule specified here (new_post -s)", "'sidebar': '' } } # POSTS and PAGES contains (wildcard, destination, template) tuples.", "False # KaTeX auto-render settings. If you want support for the $.$ syntax", "of social buttons --> # \"\"\" # Show link to source for the", "hidden. # The tag will not be displayed on the tag list page", "'center', // Change this to 'left' if you want left-aligned equations. # \"HTML-CSS\":", "True USE_BUNDLES = False # Plugins you don't want to use. Be careful", "= ['mathjax'] # If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized", "many presets # in a `nikola deploy` command as you like. # DEPLOY_COMMANDS", "use those filters, along with the defaults. # Consult your engine's documentation on", "= False # If True, publish future dated posts right away instead of", "# Modify the number of Post per Index Page # Defaults to 10", "an absolute path, it will be considered as relative # to the location", "dictionary of {source: relative destination}. # Default is: # LISTINGS_FOLDERS = {'listings': 'listings'}", "= \"\"\" # <!-- Social buttons --> # <div id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style", "# Create index.html for page folders? # WARNING: if a page would conflict", "# migration was successful. WARN_ABOUT_TAG_METADATA = False # Templates will use those filters,", "publicly, you can mark it as hidden. # The tag will not be", "be specified, # using a forward slash ('/') to separate paths. Use a", "(defined by \".. previewimage:\" in the gallery's index) in # list of galleries", "also use 'full', 'long', 'medium', or 'short' # DATE_FORMAT = 'yyyy-MM-dd HH:mm' #", "delete extensions to any list, but don't add any new # compilers unless", "not support every # feature yet, it's faster and the output looks better.", "template (base.tmpl). # (translatable) # BODY_END = \"\" # Bundle JS and CSS", "# A HTML file will be created in output/foo/from.html that redirects # to", "paths to directories containing themes to use them. # For example, the `v7`", "should be used for directory indexes? # Defaults to index.html # Common other", "{ # styles: {'.MathJax_Display': {\"margin\": 0}} # } # }); # </script> #", "side MUST be a # relative URL. # # If you don't need", "to the metadata. PRETTY_URLS = False # If True, publish future dated posts", "language. # For example, to use British instead of US English: LOCALES =", "zone, please set TIMEZONE to match. Check the available # list from Wikipedia:", "gist, nikola and podcast because those are # done in the code, hope", "(Bootstrap 4: right-side of navbar, Bootblog 4: right side of title) NAVIGATION_ALT_LINKS =", "a link ends in /index.html, drop the index.html part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/", "http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses the INDEX_FILE setting, so if that is, say,", "use this config: # MATHJAX_CONFIG = \"\"\" # <script type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({ #", "rule to all posts (not pages!) by default # SCHEDULE_ALL = False #", "color (the default ones don’t). Must be a HEX value. THEME_COLOR = '#5670d4'", "putting files in <slug>.html, put them in <slug>/index.html. # No web server configuration", "are generated. ENABLE_AUTHOR_PAGES = False # If you do not want to display", "# where the \"cache\" of partial generated content should be located # default:", "Category pages will still be generated. HIDDEN_CATEGORIES = [] # If ENABLE_AUTHOR_PAGES is", "is supported. Will only be effective # if SITE_URL points to server root.", "be anything, data, functions, modules, etc. GLOBAL_CONTEXT = {} # Add functions here", "# Only the individual posts are published/deployed; not in indexes/sitemap # Generally, you", "may conflict with running # text!), just use this config: # MATHJAX_CONFIG =", "# Common other alternatives: default.html for IIS, index.php # INDEX_FILE = \"index.html\" #", "[] # Add the absolute paths to directories containing themes to use them.", "# Defaults to False. # FUTURE_IS_NOW = False # If True, future dated", "May be used for a greeting. (translatable) FRONT_INDEX_HEADER = { DEFAULT_LANG: '' }", "regular expressions, links matching them will always be considered # valid by \"nikola", "= { \"rest\": ['.rst', '.txt'], \"markdown\": ['.md', '.mdown', '.markdown'], \"textile\": ['.textile'], \"txt2tags\": ['.t2t'],", "an author publicly, you can mark it as hidden. # The author will", "# DEPLOY_COMMANDS = { # 'default': [ # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\",", "in the default template (base.tmpl). # (translatable) # BODY_END = \"\" # Bundle", "= { DEFAULT_LANG: ( (), { \"email\": BLOG_EMAIL, \"author\": BLOG_AUTHOR, \"date\": time.gmtime().tm_year, \"license\":", "created in output/foo/from.html that redirects # to the \"/bar/to.html\" URL. notice that the", "], # processEscapes: true # }, # displayAlign: 'center', // Change this to", "[] # Presets of commands to execute to deploy. Can be anything, for", "Nikola: # disqus, facebook, intensedebate, isso, muut, commento, utterances # You can leave", "to all posts (not pages!) by default # SCHEDULE_ALL = False # Do", "Defaults to 10 # INDEX_DISPLAY_POST_COUNT = 10 # Extra things you want in", "# Use with care. # DISABLE_INDEXES = False # DISABLE_MAIN_ATOM_FEED = False #", "[ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ], # processEscapes: true # }, # displayAlign: 'center', //", "instead of MathJax? While KaTeX may not support every # feature yet, it's", "posts are published/deployed; not in indexes/sitemap # Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE", "# For example, the `plugins` directory of your clone of the Nikola plugins", "# <!-- Social buttons --> # <div id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style\">", "= \"<NAME>\" # (translatable) BLOG_TITLE = \"My Nikola Site\" # (translatable) # This", "syntax (which may # conflict with running text!), just use this config: #", "# Date fanciness. # # 0 = using DATE_FORMAT and TIMEZONE (without JS)", "files # RSS_EXTENSION = \".xml\" # RSS filename base (without extension); used for", "or run `nikola ping` from the `ping` # plugin (`nikola plugin -i ping`).", "(\"posts/*.md\", \"posts\", \"post.tmpl\"), (\"posts/*.txt\", \"posts\", \"post.tmpl\"), (\"posts/*.html\", \"posts\", \"post.tmpl\"), ) PAGES = (", "'Read more...' for the feeds, if FEED_TEASERS is True (translatable) FEED_READ_MORE_LINK = '<p><a", "future dated posts are allowed in deployed output # Only the individual posts", "in indexes/sitemap # Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same", "them as arguments # to `nikola deploy`. If no arguments are specified, a", "{} # Put in global_context things you want available on all your templates.", "might also use it # as an accent color (the default ones don’t).", "Allows scheduling of posts using the rule specified here (new_post -s) # Specify", "# If you do not want to display an author publicly, you can", "\"email\": BLOG_EMAIL, \"author\": BLOG_AUTHOR, \"date\": time.gmtime().tm_year, \"license\": LICENSE } ) } # A", "is a dict. The keys are languages, and values are tuples. NAVIGATION_LINKS =", "pages!) by default # SCHEDULE_ALL = False # Do you want to add", "organized in # hierarchies. For a post, the whole path in the hierarchy", "by Nikola: # disqus, facebook, intensedebate, isso, muut, commento, utterances # You can", "links. Works the same way NAVIGATION_LINKS does, # although themes may not always", "used to display post dates. (translatable) # Used by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time", "CSS into single files to make site loading faster in a HTTP/1.1 #", "source branch automatically # before deploying. GITHUB_COMMIT_SOURCE = True # Where the output", "a backup, or run `nikola ping` from the `ping` # plugin (`nikola plugin", "github_deploy. GITHUB_REMOTE_NAME = 'origin' # Whether or not github_deploy should commit to the", "posts. # Tag pages will still be generated. HIDDEN_TAGS = ['mathjax'] # If", "RSS feeds. Advanced # option used for traffic source tracking. FEED_LINKS_APPEND_QUERY = False", "source for the posts? SHOW_SOURCELINK = False # Copy the source files for", "= { DEFAULT_LANG: '' } # URLs to other posts/pages can take 3", "comment systems are supported by Nikola: # disqus, facebook, intensedebate, isso, muut, commento,", "scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE # options, but will have to", "'#5670d4' # Theme configuration. Fully theme-dependent. (translatable) # Samples for bootblog4 (enabled) and", "is set to True and there is more than one # author, author", "the whole path in the hierarchy must be specified, # using a forward", "License BY-NC-SA\" # style=\"border-width:0; margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A small copyright notice for", "default template (base.tmpl). # (translatable) # BODY_END = \"\" # Bundle JS and", "# This was a hidden feature of the Markdown and reST compilers in", "bootblog4 supports: featured_large featured_small featured_on_mobile # featured_large_image_on_mobile featured_strip_html sidebar # bootstrap4 supports: navbar_light", "Defaults to True. # USE_BUNDLES = True USE_BUNDLES = False # Plugins you", "tags are found in a post. Useful for checking that # migration was", "part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses the INDEX_FILE setting, so if that", "Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE = '' # If True, use the scheduling", "you don't mind ;-) # Note: most Nikola-specific extensions are done via the", "diverse things that are used by the templates # ############################################################################# # 'Read more...'", "# MathJax.Hub.Config({ # tex2jax: { # inlineMath: [ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ], # displayMath:", "(\"pages/*.md\", \"\", \"page.tmpl\"), (\"pages/*.txt\", \"\", \"page.tmpl\"), (\"pages/*.html\", \"\", \"page.tmpl\"), ) # Below this", "# list from Wikipedia: TIMEZONE = \"Europe/London\" # Date format used to display", "Extension for Atom feed files # ATOM_EXTENSION = \".atom\" # A list of", "in the # past. Useful especially if your post titles are in <h1>", "can be organized in # hierarchies. For a post, the whole path in", "about to be # rendered GLOBAL_CONTEXT_FILLER = [] # Settings for the (boot)Reveal", "# in the default template (base.tmpl). # (translatable) # BODY_END = \"\" #", "# ] # } # github_deploy configuration # For more details, read the", "on picture gallery pages? # COMMENTS_IN_GALLERIES = False # What file should be", "-rav --delete output/ joe@my.site:/srv/www/site\" # And then do a backup, or run `nikola", "be created in output/foo/from.html that redirects # to the \"/bar/to.html\" URL. notice that", "style=\"border-width:0; margin-bottom:12px;\" # src=\"https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png\"></a>\"\"\" # A small copyright notice for the page footer", "to True, the tags 'draft', 'mathjax' and 'private' have special # meaning. If", "not be added here. # Defaults are markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta is required for", "running # text!), just use this config: # MATHJAX_CONFIG = \"\"\" # <script", "False # Enable comments on picture gallery pages? # COMMENTS_IN_GALLERIES = False #", "when the template is about to be # rendered GLOBAL_CONTEXT_FILLER = [] #", "# {left: \"\\\\\\\\(\", right: \"\\\\\\\\)\", display: false} # ] # \"\"\" # What", "# ] # \"\"\" # What Markdown extensions to enable? # You will", "feature yet, it's faster and the output looks better. # USE_KATEX = False", "handled like regular tags. USE_TAG_METADATA = False # If set to True, a", "deploy. Can be anything, for # example, you may use rsync: # \"rsync", "will be considered as relative # to the location of conf.py # OUTPUT_FOLDER", "`nikola deploy`. If no arguments are specified, a preset # named `default` will", "if you want left-aligned equations. # \"HTML-CSS\": { # styles: {'.MathJax_Display': {\"margin\": 0}}", "# What file should be used for directory indexes? # Defaults to index.html", "generated by certain compilers (reST/Markdown) # will be demoted by that much (1", "subcategory called '\\' of the top-level category called '/'). CATEGORY_ALLOW_HIERARCHIES = False #", "############################################################################# # Image Gallery Options # ############################################################################# # Use a thumbnail (defined by", "REQUIRES the use of 'full_path' # URL_TYPE = 'full_path' # Extension for RSS", "Use with care. # DISABLE_INDEXES = False # DISABLE_MAIN_ATOM_FEED = False # DISABLE_MAIN_RSS_FEED", "the source files for your pages? # Setting it to False implies SHOW_SOURCELINK", "files for your pages? # Setting it to False implies SHOW_SOURCELINK = False", "does, # although themes may not always support them. (translatable) # (Bootstrap 4:", "<script type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({ # tex2jax: { # inlineMath: [ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ],", "the sidebar. # (translatable) LICENSE = \"\" # I recommend using the Creative", "markdown.extensions.(fenced_code|codehilite|extra) # markdown.extensions.meta is required for Markdown metadata. MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra',", "way (i.e. with the full templates). # The resulting files have .php extensions,", "path. CATEGORY_OUTPUT_FLAT_HIERARCHY = False # If you do not want to display a", "Templates will use those filters, along with the defaults. # Consult your engine's", "# The <hN> tags in HTML generated by certain compilers (reST/Markdown) # will", "the root # absolute: a complete URL (that includes the SITE_URL) # URL_TYPE", "do a backup, or run `nikola ping` from the `ping` # plugin (`nikola", "{} (no config at all) # MARKDOWN_EXTENSION_CONFIGS = {} # Social buttons. This", "posts. # Tag pages will still be generated. HIDDEN_AUTHORS = ['Guest'] # Optional", "to be copied as-is into the output. # The format is a dictionary", "just use this config: # KATEX_AUTO_RENDER = \"\"\" # delimiters: [ # {left:", "inlineMath: [ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ], # displayMath: [ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"] ], # processEscapes:", "tex2jax: { # inlineMath: [ ['$','$'], [\"\\\\\\(\",\"\\\\\\)\"] ], # displayMath: [ ['$$','$$'], [\"\\\\\\[\",\"\\\\\\]\"]", "format used to display post dates. (translatable) # Used by babel.dates, CLDR style:", "as hidden. # The author will not be displayed on the author list", "in <slug>.html, put them in <slug>/index.html. # No web server configuration is required.", "the theme to use. #THEME = \"bootblog4\" THEME = \"disimplex\" # A theme", "Modify the number of Post per Index Page # Defaults to 10 #", "# POSTS = ( (\"posts/*.rst\", \"posts\", \"post.tmpl\"), (\"posts/*.md\", \"posts\", \"post.tmpl\"), (\"posts/*.txt\", \"posts\", \"post.tmpl\"),", "PRETTY_URLS = False # If True, publish future dated posts right away instead", "# \"\"\" # What Markdown extensions to enable? # You will also get", "by default # SCHEDULE_ALL = False # Do you want to add a", "containing plugins to use them. # For example, the `plugins` directory of your", "# \"rsync -rav --delete output/ joe@my.site:/srv/www/site\" # And then do a backup, or", "# <ul><li><a class=\"addthis_button_facebook\"></a> # <li><a class=\"addthis_button_google_plusone_share\"></a> # <li><a class=\"addthis_button_linkedin\"></a> # <li><a class=\"addthis_button_twitter\"></a> #", "= False # Enable comments on pages (i.e. not posts)? # COMMENTS_IN_PAGES =", "thumbnail has ``.thumbnail`` added before the file extension by default, # but a", "with the MarkdownExtension class and should not be added here. # Defaults are", "\"nikola check -l\" # LINK_CHECK_WHITELIST = [] # The <hN> tags in HTML", "<a href=\"mailto:{email}\">{author}</a> {license}' RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}' RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS", "reconfiguring your server to recognize them. \"php\": ['.php'], # Pandoc detects the input", "to True, a warning is issued if one of the 'draft', 'mathjax' #", "when caching is used. # Defaults to True. # USE_BUNDLES = True USE_BUNDLES", "# Default is: # FILES_FOLDERS = {'files': ''} # Which means copy 'files'", "any list, but don't add any new # compilers unless you write the", "LICENSE } ) } # A simple copyright tag for inclusion in RSS", "use rsync: # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\" # And then do a", "not be displayed on the category list page. # Category pages will still", "to use British instead of US English: LOCALES = {'en': 'en_GB'} # LOCALES", "as arguments # to `nikola deploy`. If no arguments are specified, a preset", "You will also get gist, nikola and podcast because those are # done", "on desktop. 'featured_large_image_on_mobile': True, # Strip HTML from featured post text. 'featured_strip_html': False,", "# The resulting files have .php extensions, making it possible to run #", "\"post.tmpl\"), (\"posts/*.md\", \"posts\", \"post.tmpl\"), (\"posts/*.txt\", \"posts\", \"post.tmpl\"), (\"posts/*.html\", \"posts\", \"post.tmpl\"), ) PAGES =", "valid by \"nikola check -l\" # LINK_CHECK_WHITELIST = [] # The <hN> tags", "as an accent color (the default ones don’t). Must be a HEX value.", "the individual posts are published/deployed; not in indexes/sitemap # Generally, you want FUTURE_IS_NOW", "\"---\" METADATA_FORMAT = \"YAML\" # If you do not want to display a", "rel=\"nofollow\">Nikola</a> {license}' # Things that will be passed to CONTENT_FOOTER.format(). This is done", "your pages? # Setting it to False implies SHOW_SOURCELINK = False COPY_SOURCES =", "import time # !! This is the configuration of Nikola. !! # #", "# A mapping of languages to file-extensions that represent that language. # Feel", "'listings' into 'output/listings' # A mapping of languages to file-extensions that represent that", "displayed on the tag list page and posts. # Tag pages will still", "partial generated content should be located # default: 'cache' # CACHE_FOLDER = 'cache'", "/foo) STRIP_INDEXES = False # List of files relative to the server root", "'.txt'], \"markdown\": ['.md', '.mdown', '.markdown'], \"textile\": ['.textile'], \"txt2tags\": ['.t2t'], \"bbcode\": ['.bb'], \"wiki\": ['.wiki'],", "# Put in global_context things you want available on all your templates. #", "= [] # Settings for the (boot)Reveal theme must be added to the", "relative URL to the current page/post (default) # full_path: a URL with the", "to directories containing themes to use them. # For example, the `v7` directory", "the absolute paths to directories containing themes to use them. # For example,", "display: true}, # {left: \"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\", display: true}, # {left: \"$\", right:", "enable? # You will also get gist, nikola and podcast because those are", "can take 3 forms: # rel_path: a relative URL to the current page/post", "a warning is issued if one of the 'draft', 'mathjax' # and 'private'", "box, with the previewimage as its background. 'featured_large': False, # Show the first", "= using a string like “2 days ago” (JS, using Luxon) # #", "1.) # DEMOTE_HEADERS = 1 # If set to True, the tags 'draft',", "# Used by Luxon: https://moment.github.io/luxon/docs/manual/formatting # Example for presets: {'preset': True, 'format': 'DATE_FULL'}", "\"rsync -rav --delete output/ joe@my.site:/srv/www/site\", # ] # } # github_deploy configuration #", "right # before </head> # (translatable) # EXTRA_HEAD_DATA = \"\" # Google Analytics", "Copy the source files for your pages? # Setting it to False implies", "THEME = \"disimplex\" # A theme color. In default themes, it might be", "Which means process listings from 'listings' into 'output/listings' # A mapping of languages", "# # !! You should edit it to your liking. !! # #", "list of galleries for each gallery GALLERIES_USE_THUMBNAIL = False # Image to use", "Links for the sidebar / navigation bar. (translatable) # This is a dict.", "support for the $.$ syntax (which may # conflict with running text!), just", "themes to use them. # For example, the `v7` directory of your clone", "(see below). # You can define multiple presets and specify them as arguments", "\"RSS feed\"), ), } # Alternative navigation links. Works the same way NAVIGATION_LINKS", "because those are # done in the code, hope you don't mind ;-)", "folders containing files to be copied as-is into the output. # The format", "class=\"addthis_button_google_plusone_share\"></a> # <li><a class=\"addthis_button_linkedin\"></a> # <li><a class=\"addthis_button_twitter\"></a> # </ul> # </div> # <script", "ENABLE_AUTHOR_PAGES is set to True and there is more than one # author,", "conflict with the index file (usually # caused by setting slug to `index`),", "DEPLOY_FUTURE to be the same value. # DEPLOY_FUTURE = False # If False,", "This is the main URL for your site. It will be used #", "'\\//\\\\' is a path specifying the # subcategory called '\\' of the top-level", "output/ joe@my.site:/srv/www/site\", # ] # } # github_deploy configuration # For more details,", "naming template can be configured with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS = {'images': 'images'} # IMAGE_THUMBNAIL_SIZE", "<ul><li><a class=\"addthis_button_facebook\"></a> # <li><a class=\"addthis_button_google_plusone_share\"></a> # <li><a class=\"addthis_button_linkedin\"></a> # <li><a class=\"addthis_button_twitter\"></a> # </ul>", "for Markdown metadata. MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc'] # Options to be", "and PAGES contains (wildcard, destination, template) tuples. # (translatable) # POSTS = (", "to False) # navbar_custom_bg (defaults to '') # Config for bootblog4: THEME_CONFIG =", "site. The format is a dictionary of {source: relative destination}. # Default is:", "# One or more folders containing code listings to be processed and published", "# Enable comments on picture gallery pages? # COMMENTS_IN_GALLERIES = False # What", "{ \"email\": BLOG_EMAIL, \"author\": BLOG_AUTHOR, \"date\": time.gmtime().tm_year, \"license\": LICENSE } ) } #", "(translatable) # What is the default language? DEFAULT_LANG = \"en\" # What other", "# Name of the theme to use. #THEME = \"bootblog4\" THEME = \"disimplex\"", "new posts # \"YAML\": YAML wrapped in \"---\" METADATA_FORMAT = \"YAML\" # If", "usual way (i.e. with the full templates). # The resulting files have .php", "in a `nikola deploy` command as you like. # DEPLOY_COMMANDS = { #", "resulting files have .php extensions, making it possible to run # them without", "['.wiki'], \"ipynb\": ['.ipynb'], \"html\": ['.html', '.htm'], # PHP files are rendered the usual", "the current page/post (default) # full_path: a URL with the full path from", "from the `ping` # plugin (`nikola plugin -i ping`). Or run `nikola check", "# Allows scheduling of posts using the rule specified here (new_post -s) #", "# Plugins you don't want to use. Be careful :-) # DISABLED_PLUGINS =", "different naming template can be configured with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS = {'images': 'images'} #", "you don't use an absolute path, it will be considered as relative #", "not always support them. (translatable) # (Bootstrap 4: right-side of navbar, Bootblog 4:", "# Defaults to True. # USE_BUNDLES = True USE_BUNDLES = False # Plugins", "repository. # EXTRA_PLUGINS_DIRS = [] # Add the absolute paths to directories containing", "example. # (defaults to 1.) # DEMOTE_HEADERS = 1 # If set to", "for the $.$ syntax (which may # conflict with running text!), just use", "# LUXON_DATE_FORMAT = { # DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'}, # }", "into 'output' # One or more folders containing code listings to be processed", "You will need to configure the deployment branch on GitHub. GITHUB_SOURCE_BRANCH = 'src'", "\"\"\" # <script type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({ # tex2jax: { # inlineMath: [ ['$','$'],", "Atom and RSS feeds. Advanced # option used for traffic source tracking. FEED_LINKS_APPEND_QUERY", "value. # DEPLOY_FUTURE = False # If False, draft posts will not be", "This is sample code for AddThis (which was the default for a #", "you may use rsync: # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\" # And then", "# author, author pages are generated. ENABLE_AUTHOR_PAGES = False # If you do", "whole path in the hierarchy must be specified, # using a forward slash", "# Alternative navigation links. Works the same way NAVIGATION_LINKS does, # although themes", "to server root. The list is used to exclude resources from # /robots.txt", "posts using the rule specified here (new_post -s) # Specify an iCal Recurrence", "GITHUB_REMOTE_NAME = 'origin' # Whether or not github_deploy should commit to the source", "METADATA_FORMAT = \"YAML\" # If you do not want to display a tag", "Add functions here and they will be called with template # GLOBAL_CONTEXT as", "containing themes to use them. # For example, the `v7` directory of your", "only on desktop. 'featured_large_image_on_mobile': True, # Strip HTML from featured post text. 'featured_strip_html':", "# !! This is the configuration of Nikola. !! # # !! You", "False # Modify the number of Post per Index Page # Defaults to", "code, hope you don't mind ;-) # Note: most Nikola-specific extensions are done", "`featured_small` displays them only on desktop. 'featured_large_image_on_mobile': True, # Strip HTML from featured", "you like. # DEPLOY_COMMANDS = { # 'default': [ # \"rsync -rav --delete", "rule specified here (new_post -s) # Specify an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html #", "= '{name}.thumbnail{ext}' # ############################################################################# # HTML fragments and diverse things that are used", "use # another time zone, please set TIMEZONE to match. Check the available", "conflict # with many of the others. # \"pandoc\": ['.rst', '.md', '.txt'], }", "GitHub. GITHUB_SOURCE_BRANCH = 'src' GITHUB_DEPLOY_BRANCH = 'master' # The name of the remote", "without reconfiguring your server to recognize them. \"php\": ['.php'], # Pandoc detects the", "values are tuples. NAVIGATION_LINKS = { DEFAULT_LANG: ( (\"/archive.html\", \"Archive\"), (\"/categories/\", \"Tags\"), (\"/rss.xml\",", "is {\"translationcode\" : \"path/to/translation\" } # the path will be used as a", "'rel_path' # # Note that our use of \"server side includes\" / partials", "have to be referenced manually to be visible on the site # (the", "absolute paths to directories containing themes to use them. # For example, the", "edit it to your liking. !! # # Data about this site BLOG_AUTHOR", "engines about /sitemapindex.xml. # ROBOTS_EXCLUSIONS = [\"/archive.html\", \"/category/*.html\"] # Instead of putting files", "\"/bar/to.html\")]. # # A HTML file will be created in output/foo/from.html that redirects", "hierarchy must be specified, # using a forward slash ('/') to separate paths.", "buttons. This is sample code for AddThis (which was the default for a", "# } # }); # </script> # \"\"\" # Want to use KaTeX", "# If a link ends in /index.html, drop the index.html part. # http://mysite/foo/bar/index.html", "will not be displayed on the category list page. # Category pages will", "# MATHJAX_CONFIG = \"\"\" # <script type=\"text/x-mathjax-config\"> # MathJax.Hub.Config({ # tex2jax: { #", "and they will be called with template # GLOBAL_CONTEXT as parameter when the", "If you want support for the $.$ syntax (which may conflict with running", "left-aligned equations. # \"HTML-CSS\": { # styles: {'.MathJax_Display': {\"margin\": 0}} # } #", "paths. Use a backslash ('\\') to escape # a forward slash or a", "# ############################################################################# # Use a thumbnail (defined by \".. previewimage:\" in the gallery's", "things you want in the pages HEAD tag. This will be added right", "# from indexing and other robotic spidering. * is supported. Will only be", "the posts? SHOW_SOURCELINK = False # Copy the source files for your pages?", "your clone of the Nikola themes # repository. # EXTRA_THEMES_DIRS = [] #", "# Post's dates are considered in UTC by default, if you want to", "False # List of files relative to the server root (!) that will", "extension by default, # but a different naming template can be configured with", "# DEPLOY_DRAFTS = True # Allows scheduling of posts using the rule specified", "plugins # repository. # EXTRA_PLUGINS_DIRS = [] # Add the absolute paths to", "# CACHE_FOLDER = 'cache' # ############################################################################# # Image Gallery Options # ############################################################################# #", "# PHP files are rendered the usual way (i.e. with the full templates).", "This can be disabled on a per-page/post basis by adding # .. pretty_url:", "the \"/bar/to.html\" URL. notice that the \"from\" side MUST be a # relative", "PAGES = ( (\"pages/*.rst\", \"\", \"page.tmpl\"), (\"pages/*.md\", \"\", \"page.tmpl\"), (\"pages/*.txt\", \"\", \"page.tmpl\"), (\"pages/*.html\",", "not be displayed on the tag list page and posts. # Tag pages", "the INDEX_FILE setting, so if that is, say, default.html, # it will instead", "# <a class=\"addthis_button_more\">Share</a> # <ul><li><a class=\"addthis_button_facebook\"></a> # <li><a class=\"addthis_button_google_plusone_share\"></a> # <li><a class=\"addthis_button_linkedin\"></a> #", "Bundle JS and CSS into single files to make site loading faster in", "on mobile. 'featured_on_mobile': True, # Show image in `featured_large` on mobile. # `featured_small`", "want in the pages HEAD tag. This will be added right # before", "thumbnail for those galleries that don't have one # None: show a grey", "even make it empty (which is # the default right now) # (translatable)", "A list of redirection tuples, [(\"foo/from.html\", \"/bar/to.html\")]. # # A HTML file will", "and local user time (JS, using Luxon) # 2 = using a string", "not recommended for HTTP/2.0 when caching is used. # Defaults to True. #", "a large box, with the previewimage as its background. 'featured_large': False, # Show", "is required for Markdown metadata. MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc'] # Options", "than one # author, author pages are generated. ENABLE_AUTHOR_PAGES = False # If", "want to display a tag publicly, you can mark it as hidden. #", "bootblog4: THEME_CONFIG = { DEFAULT_LANG: { # Show the latest featured post in", "a dictionary of {source: relative destination}. # Default is: # FILES_FOLDERS = {'files':", "# https://creativecommons.org/choose/ # LICENSE = \"\"\" # <a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img alt=\"Creative", "] # \"\"\" # What Markdown extensions to enable? # You will also", "# Specify an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE = '' # If", "['.textile'], \"txt2tags\": ['.t2t'], \"bbcode\": ['.bb'], \"wiki\": ['.wiki'], \"ipynb\": ['.ipynb'], \"html\": ['.html', '.htm'], #", "supported by Nikola: # disqus, facebook, intensedebate, isso, muut, commento, utterances # You", "use 'full', 'long', 'medium', or 'short' # DATE_FORMAT = 'yyyy-MM-dd HH:mm' # Date", "If set to True, the tags 'draft', 'mathjax' and 'private' have special #", "\"author\": BLOG_AUTHOR, \"date\": time.gmtime().tm_year, \"license\": LICENSE } ) } # A simple copyright", "a prominent link. Don't forget the protocol (http/https)! SITE_URL = \"https://example.com/\" # This", "Bootstrap already does. # DATE_FANCINESS = 0 # Customize the locale/region used for", "\"path/to/translation\" } # the path will be used as a prefix for the", "path will be used as a prefix for the generated pages location TRANSLATIONS", "# POSTS and PAGES contains (wildcard, destination, template) tuples. # (translatable) # POSTS", "page and posts. # Tag pages will still be generated. HIDDEN_AUTHORS = ['Guest']", "False, draft posts will not be deployed # DEPLOY_DRAFTS = True # Allows", "the Nikola themes # repository. # EXTRA_THEMES_DIRS = [] # List of regular", "it as hidden. # The author will not be displayed on the author", "{source: relative destination}. # Default is: # LISTINGS_FOLDERS = {'listings': 'listings'} # Which", "files # ATOM_EXTENSION = \".atom\" # A list of redirection tuples, [(\"foo/from.html\", \"/bar/to.html\")].", "\"YAML\": YAML wrapped in \"---\" METADATA_FORMAT = \"YAML\" # If you do not", "# # The default compiler for `new_post` is the first entry in the", "links matching them will always be considered # valid by \"nikola check -l\"", "tag. This will be added right # before </head> # (translatable) # EXTRA_HEAD_DATA", "# If ENABLE_AUTHOR_PAGES is set to True and there is more than one", "bootblog4 (enabled) and bootstrap4 (commented) follow. # bootblog4 supports: featured_large featured_small featured_on_mobile #", "Defaults to index.html # Common other alternatives: default.html for IIS, index.php # INDEX_FILE", "class=\"addthis_button_linkedin\"></a> # <li><a class=\"addthis_button_twitter\"></a> # </ul> # </div> # <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!--", "comment # systems. The following comment systems are supported by Nikola: # disqus,", "full templates). # The resulting files have .php extensions, making it possible to", "(commented) follow. # bootblog4 supports: featured_large featured_small featured_on_mobile # featured_large_image_on_mobile featured_strip_html sidebar #", "to the \"/bar/to.html\" URL. notice that the \"from\" side MUST be a #", "config file? # MATHJAX_CONFIG = \"\" # If you want support for the", "The <hN> tags in HTML generated by certain compilers (reST/Markdown) # will be", "# 'rest' is reStructuredText # 'markdown' is Markdown # 'html' assumes the file", "# .. pretty_url: False # to the metadata. PRETTY_URLS = False # If", "dict. The keys are languages, and values are tuples. NAVIGATION_LINKS = { DEFAULT_LANG:", "# and 'private' tags are found in a post. Useful for checking that", "# navbar_custom_bg (defaults to '') # Config for bootblog4: THEME_CONFIG = { DEFAULT_LANG:", "} # A simple copyright tag for inclusion in RSS feeds that works", "used for a greeting. (translatable) FRONT_INDEX_HEADER = { DEFAULT_LANG: '' } # URLs", "'') # Config for bootblog4: THEME_CONFIG = { DEFAULT_LANG: { # Show the", "for AddThis (which was the default for a # long time). Insert anything", "# 'html' assumes the file is HTML and just copies it COMPILERS =", "= ( (\"pages/*.rst\", \"\", \"page.tmpl\"), (\"pages/*.md\", \"\", \"page.tmpl\"), (\"pages/*.txt\", \"\", \"page.tmpl\"), (\"pages/*.html\", \"\",", "in `featured_large` on mobile. # `featured_small` displays them only on desktop. 'featured_large_image_on_mobile': True,", "options, but will have to be referenced manually to be visible on the", "To use comments, you can choose between different third party comment # systems.", "using a forward slash ('/') to separate paths. Use a backslash ('\\') to", "False # Enable comments on pages (i.e. not posts)? # COMMENTS_IN_PAGES = False", "Name of the theme to use. #THEME = \"bootblog4\" THEME = \"disimplex\" #", "Luxon) # 2 = using a string like “2 days ago” (JS, using", "= False # DISABLE_MAIN_RSS_FEED = False # Add the absolute paths to directories", "Want to use KaTeX instead of MathJax? While KaTeX may not support every", "it # as an accent color (the default ones don’t). Must be a", "especially if your post titles are in <h1> tags too, for # example.", "future dated posts right away instead of scheduling them. # Defaults to False.", "# example, you may use rsync: # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\" #", "presets # in a `nikola deploy` command as you like. # DEPLOY_COMMANDS =", "also need to add your COMMENT_SYSTEM_ID which # depends on what comment system", "empty, the sidebar is not displayed. 'sidebar': '' } } # POSTS and", "the # past. Useful especially if your post titles are in <h1> tags", "the location of conf.py # OUTPUT_FOLDER = 'output' # where the \"cache\" of", "considered # valid by \"nikola check -l\" # LINK_CHECK_WHITELIST = [] # The", "or 'short' # DATE_FORMAT = 'yyyy-MM-dd HH:mm' # Date format used to display", "(wildcard, destination, template) tuples. # (translatable) # POSTS = ( (\"posts/*.rst\", \"posts\", \"post.tmpl\"),", "as a prefix for the generated pages location TRANSLATIONS = { DEFAULT_LANG: \"\",", "= { DEFAULT_LANG: () } # Name of the theme to use. #THEME", "\"page.tmpl\"), (\"pages/*.html\", \"\", \"page.tmpl\"), ) # Below this point, everything is optional #", "list is used to exclude resources from # /robots.txt and /sitemap.xml, and to", "# repository. # EXTRA_PLUGINS_DIRS = [] # Add the absolute paths to directories", "the output looks better. # USE_KATEX = False # KaTeX auto-render settings. If", "({min_remaining_read})</p>' # Append a URL query to the FEED_READ_MORE_LINK in Atom and RSS", "be demoted by that much (1 → h1 will become h2 and so", "= ['Guest'] # Optional HTML that displayed on “main” blog index.html files. #", "of navbar, Bootblog 4: right side of title) NAVIGATION_ALT_LINKS = { DEFAULT_LANG: ()", "is about to be # rendered GLOBAL_CONTEXT_FILLER = [] # Settings for the", "not set, defaults to SITE_URL # BASE_URL = \"https://example.com/\" BLOG_EMAIL = \"<EMAIL>\" BLOG_DESCRIPTION", "time.gmtime().tm_year, \"license\": LICENSE } ) } # A simple copyright tag for inclusion", "the SITE_URL) # URL_TYPE = 'rel_path' # # Note that our use of", "If a link ends in /index.html, drop the index.html part. # http://mysite/foo/bar/index.html =>", "Enable comments on pages (i.e. not posts)? # COMMENTS_IN_PAGES = False # Enable", "of your clone of the Nikola themes # repository. # EXTRA_THEMES_DIRS = []", "source filename # but is disabled by default as it would conflict #", "small boxes. 'featured_small': False, # Show featured posts on mobile. 'featured_on_mobile': True, #", "Feel free to add or delete extensions to any list, but don't add", "False # Plugins you don't want to use. Be careful :-) # DISABLED_PLUGINS", "link to source for the posts? SHOW_SOURCELINK = False # Copy the source", "to configure the deployment branch on GitHub. GITHUB_SOURCE_BRANCH = 'src' GITHUB_DEPLOY_BRANCH = 'master'", "DEFAULT_LANG: ( (), { \"email\": BLOG_EMAIL, \"author\": BLOG_AUTHOR, \"date\": time.gmtime().tm_year, \"license\": LICENSE }", "of \"server side includes\" / partials # REQUIRES the use of 'full_path' #", "FUTURE_IS_NOW = False # If True, future dated posts are allowed in deployed", "BASE_URL = \"https://example.com/\" BLOG_EMAIL = \"<EMAIL>\" BLOG_DESCRIPTION = \"This is a demo site", "to other posts/pages can take 3 forms: # rel_path: a relative URL to", "in that url GALLERIES_DEFAULT_THUMBNAIL = None # Images will be scaled down according", "'format': 'DATE_FULL'} # LUXON_DATE_FORMAT = { # DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'},", "{} # Social buttons. This is sample code for AddThis (which was the", "} # URLs to other posts/pages can take 3 forms: # rel_path: a", "your templates. # It can be anything, data, functions, modules, etc. GLOBAL_CONTEXT =", "the \"cache\" of partial generated content should be located # default: 'cache' #", "them. \"php\": ['.php'], # Pandoc detects the input from the source filename #", "A simple copyright tag for inclusion in RSS feeds that works just #", "need any of these, just set to [] REDIRECTIONS = [] # Presets", "# '/url/to/file': show the image in that url GALLERIES_DEFAULT_THUMBNAIL = None # Images", "featured_large_image_on_mobile featured_strip_html sidebar # bootstrap4 supports: navbar_light (defaults to False) # navbar_custom_bg (defaults", "a string like “2 days ago” (JS, using Luxon) # # Your theme", "will translated input files be named like? TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' # Links for", "of the top-level category called '/'). CATEGORY_ALLOW_HIERARCHIES = False # If CATEGORY_OUTPUT_FLAT_HIERARCHY is", "plugin -i ping`). Or run `nikola check -l`. # You may also want", "If set to False, these tags are handled like regular tags. USE_TAG_METADATA =", "Samples for bootblog4 (enabled) and bootstrap4 (commented) follow. # bootblog4 supports: featured_large featured_small", "you don't need any of these, just set to [] REDIRECTIONS = []", "indexes. # (translatable) ATOM_FILENAME_BASE = \"feed\" # Extension for Atom feed files #", "= False # KaTeX auto-render settings. If you want support for the $.$", "example, the `v7` directory of your clone of the Nikola themes # repository.", "\"Tags\"), (\"/rss.xml\", \"RSS feed\"), ), } # Alternative navigation links. Works the same", "command as you like. # DEPLOY_COMMANDS = { # 'default': [ # \"rsync", "# # A HTML file will be created in output/foo/from.html that redirects #", "# IMAGE_THUMBNAIL_FORMAT = '{name}.thumbnail{ext}' # ############################################################################# # HTML fragments and diverse things that", "TRANSLATIONS = { DEFAULT_LANG: \"\", # Example for another language: # \"es\": \"./es\",", "don't mind ;-) # Note: most Nikola-specific extensions are done via the Nikola", "# Presets of commands to execute to deploy. Can be anything, for #", "= \"bootblog4\" THEME = \"disimplex\" # A theme color. In default themes, it", "True (translatable) FEED_READ_MORE_LINK = '<p><a href=\"{link}\">{read_more}…</a> ({min_remaining_read})</p>' # Append a URL query to", "<a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}' # Things that will be passed to CONTENT_FOOTER.format(). This", "show the image in that url GALLERIES_DEFAULT_THUMBNAIL = None # Images will be", "leaf category and not the whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY = False # If you", "'src' GITHUB_DEPLOY_BRANCH = 'master' # The name of the remote where you wish", "https://creativecommons.org/choose/ # LICENSE = \"\"\" # <a rel=\"license\" href=\"https://creativecommons.org/licenses/by-nc-sa/4.0/\"> # <img alt=\"Creative Commons", "# Config for bootblog4: THEME_CONFIG = { DEFAULT_LANG: { # Show the latest", "copyright notice for the page footer (in HTML). # (translatable) CONTENT_FOOTER = 'Contents", "'featured_on_mobile': True, # Show image in `featured_large` on mobile. # `featured_small` displays them", "THEME_CONFIG = { DEFAULT_LANG: { # Show the latest featured post in a", "was successful. WARN_ABOUT_TAG_METADATA = False # Templates will use those filters, along with", "value. THEME_COLOR = '#5670d4' # Theme configuration. Fully theme-dependent. (translatable) # Samples for", "else you use. Added to the bottom of <body> # in the default", "to your liking. !! # # Data about this site BLOG_AUTHOR = \"<NAME>\"", "in RSS feeds that works just # like CONTENT_FOOTER and CONTENT_FOOTER_FORMATS RSS_COPYRIGHT =", "from # /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml. #", "to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced # option used for", "['.bb'], \"wiki\": ['.wiki'], \"ipynb\": ['.ipynb'], \"html\": ['.html', '.htm'], # PHP files are rendered", "to deploy. Can be anything, for # example, you may use rsync: #", "# (translatable) ATOM_FILENAME_BASE = \"feed\" # Extension for Atom feed files # ATOM_EXTENSION", "files relative to the server root (!) that will be asked to be", "# past. Useful especially if your post titles are in <h1> tags too,", "show a grey square # '/url/to/file': show the image in that url GALLERIES_DEFAULT_THUMBNAIL", "= False # Modify the number of Post per Index Page # Defaults", "Date fanciness. # # 0 = using DATE_FORMAT and TIMEZONE (without JS) #", "is a dictionary of {source: relative destination}. # Default is: # LISTINGS_FOLDERS =", "\"/bar/to.html\" URL. notice that the \"from\" side MUST be a # relative URL.", "and posts. # Tag pages will still be generated. HIDDEN_TAGS = ['mathjax'] #", "joe@my.site:/srv/www/site\" # And then do a backup, or run `nikola ping` from the", "# ############################################################################# # Image Gallery Options # ############################################################################# # Use a thumbnail (defined", "= '' # If True, use the scheduling rule to all posts (not", "True and there is more than one # author, author pages are generated.", "to True. # USE_BUNDLES = True USE_BUNDLES = False # Plugins you don't", "Date format used to display post dates, if local dates are used. (translatable)", "# PAGE_INDEX = False # Enable comments on pages (i.e. not posts)? #", "# \"\"\" # Show link to source for the posts? SHOW_SOURCELINK = False", "content should be located # default: 'cache' # CACHE_FOLDER = 'cache' # #############################################################################", "plugin (`nikola plugin -i ping`). Or run `nikola check -l`. # You may", "of Nikola. !! # # !! You should edit it to your liking.", "# Date format used to display post dates. (translatable) # Used by babel.dates,", "# \"HTML-CSS\": { # styles: {'.MathJax_Display': {\"margin\": 0}} # } # }); #", "tags 'draft', 'mathjax' and 'private' have special # meaning. If set to False,", "if your post titles are in <h1> tags too, for # example. #", "language: # \"es\": \"./es\", } # What will translated input files be named", "author publicly, you can mark it as hidden. # The author will not", "sidebar # bootstrap4 supports: navbar_light (defaults to False) # navbar_custom_bg (defaults to '')", "arguments are specified, a preset # named `default` will be executed. You can", "in output/foo/from.html that redirects # to the \"/bar/to.html\" URL. notice that the \"from\"", "A theme color. In default themes, it might be displayed by some browsers", "MATHJAX_CONFIG = \"\" # If you want support for the $.$ syntax (which", "</ul> # </div> # <script src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!-- End of social buttons -->", "# Tag pages will still be generated. HIDDEN_TAGS = ['mathjax'] # If CATEGORY_ALLOW_HIERARCHIES", "{license}' RSS_COPYRIGHT_PLAIN = 'Contents © {date} {author} {license}' RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS # To", "of the sidebar, If empty, the sidebar is not displayed. 'sidebar': '' }", "} # }); # </script> # \"\"\" # Want to use KaTeX instead", "containing files to be copied as-is into the output. # The format is", "are allowed in deployed output # Only the individual posts are published/deployed; not", "processEscapes: true # }, # displayAlign: 'center', // Change this to 'left' if", "individual posts are published/deployed; not in indexes/sitemap # Generally, you want FUTURE_IS_NOW and", "for indexes. # (translatable) ATOM_FILENAME_BASE = \"feed\" # Extension for Atom feed files", "the manual: # https://getnikola.com/handbook.html#deploying-to-github # You will need to configure the deployment branch", "gallery GALLERIES_USE_THUMBNAIL = False # Image to use as thumbnail for those galleries", "add your COMMENT_SYSTEM_ID which # depends on what comment system you use. The", "Insert anything you want here, or even make it empty (which is #", "# {left: \"$$\", right: \"$$\", display: true}, # {left: \"\\\\\\\\[\", right: \"\\\\\\\\]\", display:", "engine's documentation on filters if you need help defining # those. # TEMPLATE_FILTERS", "be passed to markdown extensions (See https://python-markdown.github.io/reference/) # Default is {} (no config", "\"page.tmpl\"), (\"pages/*.txt\", \"\", \"page.tmpl\"), (\"pages/*.html\", \"\", \"page.tmpl\"), ) # Below this point, everything", "the site. The format is a dictionary of {source: relative destination}. # Default", "complete URL (that includes the SITE_URL) # URL_TYPE = 'rel_path' # # Note", "use github_deploy (see below). # You can define multiple presets and specify them", "CACHE_FOLDER = 'cache' # ############################################################################# # Image Gallery Options # ############################################################################# # Use", "default is # \"nikolademo\" which is a test account for Disqus. More information", "IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE # options, but will have to be referenced manually to", "<!-- Social buttons --> # <div id=\"addthisbox\" class=\"addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style\"> #", "is more than one # author, author pages are generated. ENABLE_AUTHOR_PAGES = False", "but is not recommended for HTTP/2.0 when caching is used. # Defaults to", "the Markdown and reST compilers in the # past. Useful especially if your", "are # done in the code, hope you don't mind ;-) # Note:", "Markdown metadata. MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc'] # Options to be passed", "into single files to make site loading faster in a HTTP/1.1 # environment", "be called with template # GLOBAL_CONTEXT as parameter when the template is about", "defaults. # Consult your engine's documentation on filters if you need help defining", "# None: show a grey square # '/url/to/file': show the image in that", "\"ipynb\": ['.ipynb'], \"html\": ['.html', '.htm'], # PHP files are rendered the usual way", "YAML wrapped in \"---\" METADATA_FORMAT = \"YAML\" # If you do not want", "href=\"{link}\">{read_more}…</a></p>' # 'Read more...' for the feeds, if FEED_TEASERS is True (translatable) FEED_READ_MORE_LINK", "and other robotic spidering. * is supported. Will only be effective # if", "them will always be considered # valid by \"nikola check -l\" # LINK_CHECK_WHITELIST", "= [] # Presets of commands to execute to deploy. Can be anything,", "time). Insert anything you want here, or even make it empty (which is", "setting slug to `index`), the PAGE_INDEX # will not be generated for that", "you do not want to display an author publicly, you can mark it", "for your pages? # Setting it to False implies SHOW_SOURCELINK = False COPY_SOURCES", "theme must be added to the global context. # subtheme selection: beige/serif/simple/sky/night/default #", "as hidden. # The category will not be displayed on the category list", "the server root (!) that will be asked to be excluded # from", "GITHUB_COMMIT_SOURCE = True # Where the output site should be located # If", "Settings for the (boot)Reveal theme must be added to the global context. #", "babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time # You can also use 'full', 'long', 'medium', or", "LOCALES = {'en': 'en_GB'} # LOCALES = {} # One or more folders", "forward slash or a backslash (i.e. '\\//\\\\' is a path specifying the #", "using the Creative Commons' wizard: # https://creativecommons.org/choose/ # LICENSE = \"\"\" # <a", "STRIP_INDEXES. # This can be disabled on a per-page/post basis by adding #", "Only the individual posts are published/deployed; not in indexes/sitemap # Generally, you want", "available # list from Wikipedia: TIMEZONE = \"Europe/London\" # Date format used to", "= \"Europe/London\" # Date format used to display post dates. (translatable) # Used", "'yyyy-MM-dd HH:mm' # Date format used to display post dates, if local dates", "(translatable) # POSTS = ( (\"posts/*.rst\", \"posts\", \"post.tmpl\"), (\"posts/*.md\", \"posts\", \"post.tmpl\"), (\"posts/*.txt\", \"posts\",", "class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' # 'Read more...' for the feeds, if FEED_TEASERS is True (translatable)", ".. pretty_url: False # to the metadata. PRETTY_URLS = False # If True,", "Instead of putting files in <slug>.html, put them in <slug>/index.html. # No web", "CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output # contains only", "FEED_LINKS_APPEND_QUERY = False # A HTML fragment describing the license, for the sidebar.", "a page would conflict with the index file (usually # caused by setting", "This is done CONTENT_FOOTER_FORMATS = { DEFAULT_LANG: ( (), { \"email\": BLOG_EMAIL, \"author\":", "(), { \"email\": BLOG_EMAIL, \"author\": BLOG_AUTHOR, \"date\": time.gmtime().tm_year, \"license\": LICENSE } ) }", "= { # 'default': [ # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\", # ]", "default.html for IIS, index.php # INDEX_FILE = \"index.html\" # If a link ends", "listings from 'listings' into 'output/listings' # A mapping of languages to file-extensions that", "along with the defaults. # Consult your engine's documentation on filters if you", "are considered in UTC by default, if you want to use # another", "by default as it would conflict # with many of the others. #", "they will be called with template # GLOBAL_CONTEXT as parameter when the template", "'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc'] # Options to be passed to markdown extensions (See https://python-markdown.github.io/reference/)", "called '\\' of the top-level category called '/'). CATEGORY_ALLOW_HIERARCHIES = False # If", "text!), just use this config: # KATEX_AUTO_RENDER = \"\"\" # delimiters: [ #", "be generated. HIDDEN_CATEGORIES = [] # If ENABLE_AUTHOR_PAGES is set to True and", "# (translatable) LICENSE = \"\" # I recommend using the Creative Commons' wizard:", "# A small copyright notice for the page footer (in HTML). # (translatable)", "index.php # INDEX_FILE = \"index.html\" # If a link ends in /index.html, drop", "by babel.dates, CLDR style: http://cldr.unicode.org/translation/date-time-1/date-time # You can also use 'full', 'long', 'medium',", "you want available on all your templates. # It can be anything, data,", "DISABLED_PLUGINS = [\"render_galleries\"] # Special settings to disable only parts of the indexes", "# 'markdown' is Markdown # 'html' assumes the file is HTML and just", "user time (JS, using Luxon) # 2 = using a string like “2", "= 'full_path' # Extension for RSS feed files # RSS_EXTENSION = \".xml\" #", "using the rule specified here (new_post -s) # Specify an iCal Recurrence Rule:", "False # If False, draft posts will not be deployed # DEPLOY_DRAFTS =", "\"\\\\\\\\]\", display: true}, # {left: \"\\\\\\\\begin{equation*}\", right: \"\\\\\\\\end{equation*}\", display: true}, # {left: \"$\",", "https://moment.github.io/luxon/docs/manual/formatting # Example for presets: {'preset': True, 'format': 'DATE_FULL'} # LUXON_DATE_FORMAT = {", "for the generated pages location TRANSLATIONS = { DEFAULT_LANG: \"\", # Example for", "Social buttons. This is sample code for AddThis (which was the default for", "things that are used by the templates # ############################################################################# # 'Read more...' for", "the default right now) # (translatable) # SOCIAL_BUTTONS_CODE = \"\"\" # <!-- Social", "UI color (eg. Chrome on Android). Other themes might also use it #", "make it empty (which is # the default right now) # (translatable) #", "\"txt2tags\": ['.t2t'], \"bbcode\": ['.bb'], \"wiki\": ['.wiki'], \"ipynb\": ['.ipynb'], \"html\": ['.html', '.htm'], # PHP", "* is supported. Will only be effective # if SITE_URL points to server", "} # the path will be used as a prefix for the generated", "warning is issued if one of the 'draft', 'mathjax' # and 'private' tags", "of <body> # in the default template (base.tmpl). # (translatable) # BODY_END =", "default themes, it might be displayed by some browsers as # the browser", "by Luxon: https://moment.github.io/luxon/docs/manual/formatting # Example for presets: {'preset': True, 'format': 'DATE_FULL'} # LUXON_DATE_FORMAT", "for Disqus. More information # is in the manual. COMMENT_SYSTEM_ID = \"\" #", "= \"https://example.com/\" # This is the URL where Nikola's output will be deployed.", "You should edit it to your liking. !! # # Data about this", "Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE # options, but", "(boot)Reveal theme must be added to the global context. # subtheme selection: beige/serif/simple/sky/night/default", "displayed by some browsers as # the browser UI color (eg. Chrome on", "with the defaults. # Consult your engine's documentation on filters if you need", "= '{path}.{lang}.{ext}' # Links for the sidebar / navigation bar. (translatable) # This", "category and not the whole path. CATEGORY_OUTPUT_FLAT_HIERARCHY = False # If you do", "for the page footer (in HTML). # (translatable) CONTENT_FOOTER = 'Contents &copy; {date}", "default for a # long time). Insert anything you want here, or even", "example, the `plugins` directory of your clone of the Nikola plugins # repository.", "0 # Customize the locale/region used for a language. # For example, to", "special # meaning. If set to False, these tags are handled like regular", "isso, muut, commento, utterances # You can leave this option blank to disable", "Preferred metadata format for new posts # \"YAML\": YAML wrapped in \"---\" METADATA_FORMAT", "also use it # as an accent color (the default ones don’t). Must", "Show featured posts on mobile. 'featured_on_mobile': True, # Show image in `featured_large` on", "rsync: # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\" # And then do a backup,", "of MathJax? While KaTeX may not support every # feature yet, it's faster", "[(\"foo/from.html\", \"/bar/to.html\")]. # # A HTML file will be created in output/foo/from.html that", "passed to CONTENT_FOOTER.format(). This is done CONTENT_FOOTER_FORMATS = { DEFAULT_LANG: ( (), {", "`plugins` directory of your clone of the Nikola plugins # repository. # EXTRA_PLUGINS_DIRS", "and reST compilers in the # past. Useful especially if your post titles", "tuples. NAVIGATION_LINKS = { DEFAULT_LANG: ( (\"/archive.html\", \"Archive\"), (\"/categories/\", \"Tags\"), (\"/rss.xml\", \"RSS feed\"),", "src=\"https://s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798\"></script> # <!-- End of social buttons --> # \"\"\" # Show link", "config: # KATEX_AUTO_RENDER = \"\"\" # delimiters: [ # {left: \"$$\", right: \"$$\",", "markdown.extensions.meta is required for Markdown metadata. MARKDOWN_EXTENSIONS = ['markdown.extensions.fenced_code', 'markdown.extensions.codehilite', 'markdown.extensions.extra', 'markdown.extensions.toc'] #", "format is a dictionary of {source: relative destination}. # Default is: # LISTINGS_FOLDERS", "{ # 'default': [ # \"rsync -rav --delete output/ joe@my.site:/srv/www/site\", # ] #", "a grey square # '/url/to/file': show the image in that url GALLERIES_DEFAULT_THUMBNAIL =", "Useful especially if your post titles are in <h1> tags too, for #", "things you want available on all your templates. # It can be anything,", "False # Templates will use those filters, along with the defaults. # Consult", "'DATE_FULL'} # LUXON_DATE_FORMAT = { # DEFAULT_LANG: {'preset': False, 'format': 'yyyy-MM-dd HH:mm'}, #", "(\"pages/*.rst\", \"\", \"page.tmpl\"), (\"pages/*.md\", \"\", \"page.tmpl\"), (\"pages/*.txt\", \"\", \"page.tmpl\"), (\"pages/*.html\", \"\", \"page.tmpl\"), )", "to add a Mathjax config file? # MATHJAX_CONFIG = \"\" # If you", "While KaTeX may not support every # feature yet, it's faster and the", "default as it would conflict # with many of the others. # \"pandoc\":", "# Things that will be passed to CONTENT_FOOTER.format(). This is done CONTENT_FOOTER_FORMATS =", "sample code for AddThis (which was the default for a # long time).", "output will be deployed. # If not set, defaults to SITE_URL # BASE_URL", "# If you want support for the $.$ syntax (which may conflict with", "'left' if you want left-aligned equations. # \"HTML-CSS\": { # styles: {'.MathJax_Display': {\"margin\":", "# RSS_EXTENSION = \".xml\" # RSS filename base (without extension); used for indexes", "supports: navbar_light (defaults to False) # navbar_custom_bg (defaults to '') # Config for", "parameter when the template is about to be # rendered GLOBAL_CONTEXT_FILLER = []", "LINK_CHECK_WHITELIST = [] # The <hN> tags in HTML generated by certain compilers", "# Strip HTML from featured post text. 'featured_strip_html': False, # Contents of the", "define multiple presets and specify them as arguments # to `nikola deploy`. If", "relative destination}. # Default is: # FILES_FOLDERS = {'files': ''} # Which means", "run `nikola ping` from the `ping` # plugin (`nikola plugin -i ping`). Or", "# If set to True, the tags 'draft', 'mathjax' and 'private' have special", "ROBOTS_EXCLUSIONS = [\"/archive.html\", \"/category/*.html\"] # Instead of putting files in <slug>.html, put them", "# example. # (defaults to 1.) # DEMOTE_HEADERS = 1 # If set", "The tag will not be displayed on the tag list page and posts.", "of regular expressions, links matching them will always be considered # valid by", "metadata format for new posts # \"YAML\": YAML wrapped in \"---\" METADATA_FORMAT =", "on the category list page. # Category pages will still be generated. HIDDEN_CATEGORIES", "(!) that will be asked to be excluded # from indexing and other", "you want to add a Mathjax config file? # MATHJAX_CONFIG = \"\" #", "to the global context. # subtheme selection: beige/serif/simple/sky/night/default # transition selection: cube/page/concave/linear/none/default GLOBAL_CONTEXT.update({", "Alternative navigation links. Works the same way NAVIGATION_LINKS does, # although themes may", "SITE_URL points to server root. The list is used to exclude resources from", "https://python-markdown.github.io/reference/) # Default is {} (no config at all) # MARKDOWN_EXTENSION_CONFIGS = {}", "extensions, making it possible to run # them without reconfiguring your server to", "more than one # author, author pages are generated. ENABLE_AUTHOR_PAGES = False #", "is set to True, categories can be organized in # hierarchies. For a", "used for directory indexes? # Defaults to index.html # Common other alternatives: default.html", "# option used for traffic source tracking. FEED_LINKS_APPEND_QUERY = False # A HTML", "Presets of commands to execute to deploy. Can be anything, for # example,", "The resulting files have .php extensions, making it possible to run # them", "loading faster in a HTTP/1.1 # environment but is not recommended for HTTP/2.0", "be used # in a prominent link. Don't forget the protocol (http/https)! SITE_URL", "{license}' RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS # To use comments, you can choose between different", "using a string like “2 days ago” (JS, using Luxon) # # Your", "(translatable) BLOG_TITLE = \"My Nikola Site\" # (translatable) # This is the main", "# Add functions here and they will be called with template # GLOBAL_CONTEXT", "be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE # options, but will have", "} # Name of the theme to use. #THEME = \"bootblog4\" THEME =", "True, # Show image in `featured_large` on mobile. # `featured_small` displays them only", "# } # Date fanciness. # # 0 = using DATE_FORMAT and TIMEZONE", "but a different naming template can be configured with IMAGE_THUMBNAIL_FORMAT). IMAGE_FOLDERS = {'images':", "templates). # The resulting files have .php extensions, making it possible to run", "another language: # \"es\": \"./es\", } # What will translated input files be", "third party comment # systems. The following comment systems are supported by Nikola:", "\"feed\" # Extension for Atom feed files # ATOM_EXTENSION = \".atom\" # A", "the index file (usually # caused by setting slug to `index`), the PAGE_INDEX", "= \"\" # If you want support for the $.$ syntax (which may", "other posts/pages can take 3 forms: # rel_path: a relative URL to the", "is disabled by default as it would conflict # with many of the", "location TRANSLATIONS = { DEFAULT_LANG: \"\", # Example for another language: # \"es\":", "# SOCIAL_BUTTONS_CODE = \"\"\" # <!-- Social buttons --> # <div id=\"addthisbox\" class=\"addthis_toolbox", "BODY_END = \"\" # Bundle JS and CSS into single files to make", "drop the index.html part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses the INDEX_FILE setting,", "= \"My Nikola Site\" # (translatable) # This is the main URL for", "(translatable) # EXTRA_HEAD_DATA = \"\" # Google Analytics or whatever else you use.", "# using a forward slash ('/') to separate paths. Use a backslash ('\\')", "context. # subtheme selection: beige/serif/simple/sky/night/default # transition selection: cube/page/concave/linear/none/default GLOBAL_CONTEXT.update({ 'subtheme': 'simple', 'transition':", "subtheme selection: beige/serif/simple/sky/night/default # transition selection: cube/page/concave/linear/none/default GLOBAL_CONTEXT.update({ 'subtheme': 'simple', 'transition': 'none' })", "KaTeX auto-render settings. If you want support for the $.$ syntax (which may", "be displayed on the category list page. # Category pages will still be", "new # compilers unless you write the interface for it yourself. # #", "\"markdown\": ['.md', '.mdown', '.markdown'], \"textile\": ['.textile'], \"txt2tags\": ['.t2t'], \"bbcode\": ['.bb'], \"wiki\": ['.wiki'], \"ipynb\":", "the same value. # DEPLOY_FUTURE = False # If False, draft posts will", "want to add a Mathjax config file? # MATHJAX_CONFIG = \"\" # If", "{ # Show the latest featured post in a large box, with the", "the use of 'full_path' # URL_TYPE = 'full_path' # Extension for RSS feed", "browsers as # the browser UI color (eg. Chrome on Android). Other themes", "featured post text. 'featured_strip_html': False, # Contents of the sidebar, If empty, the", "'<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' # 'Read more...' for the feeds, if FEED_TEASERS is True", "Markdown extensions to enable? # You will also get gist, nikola and podcast", "is the main URL for your site. It will be used # in", "# them without reconfiguring your server to recognize them. \"php\": ['.php'], # Pandoc", "means process listings from 'listings' into 'output/listings' # A mapping of languages to", "more details, read the manual: # https://getnikola.com/handbook.html#deploying-to-github # You will need to configure", "The keys are languages, and values are tuples. NAVIGATION_LINKS = { DEFAULT_LANG: (", "is a test account for Disqus. More information # is in the manual.", "scheduling rule to all posts (not pages!) by default # SCHEDULE_ALL = False", "# KaTeX auto-render settings. If you want support for the $.$ syntax (which", "'full_path' # URL_TYPE = 'full_path' # Extension for RSS feed files # RSS_EXTENSION", "= 'Contents &copy; {date} <a href=\"mailto:{email}\">{author}</a> - Powered by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}'", "(`nikola plugin -i ping`). Or run `nikola check -l`. # You may also", "not be generated for that directory. # PAGE_INDEX = False # Enable comments", "If you don't need any of these, just set to [] REDIRECTIONS =", "\"This is a demo site for Nikola.\" # (translatable) # What is the", "(defaults to 1.) # DEMOTE_HEADERS = 1 # If set to True, the", "# another time zone, please set TIMEZONE to match. Check the available #", "named like? TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}' # Links for the sidebar / navigation bar.", "USE_BUNDLES = False # Plugins you don't want to use. Be careful :-)", "themes, it might be displayed by some browsers as # the browser UI", "posts? SHOW_SOURCELINK = False # Copy the source files for your pages? #", "or not github_deploy should commit to the source branch automatically # before deploying.", "manual. COMMENT_SYSTEM_ID = \"\" # Create index.html for page folders? # WARNING: if", "done CONTENT_FOOTER_FORMATS = { DEFAULT_LANG: ( (), { \"email\": BLOG_EMAIL, \"author\": BLOG_AUTHOR, \"date\":", "} # What will translated input files be named like? TRANSLATIONS_PATTERN = '{path}.{lang}.{ext}'", "INDEX_TEASERS is True (translatable) INDEX_READ_MORE_LINK = '<p class=\"more\"><a href=\"{link}\">{read_more}…</a></p>' # 'Read more...' for", "files to be copied as-is into the output. # The format is a", "basis by adding # .. pretty_url: False # to the metadata. PRETTY_URLS =", "publicly, you can mark it as hidden. # The category will not be", "is {} (no config at all) # MARKDOWN_EXTENSION_CONFIGS = {} # Social buttons.", "for the sidebar. # (translatable) LICENSE = \"\" # I recommend using the", "(new_post -s) # Specify an iCal Recurrence Rule: https://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE = ''", "those are # done in the code, hope you don't mind ;-) #", "style: http://cldr.unicode.org/translation/date-time-1/date-time # You can also use 'full', 'long', 'medium', or 'short' #", "these, just set to [] REDIRECTIONS = [] # Presets of commands to", "compiler for `new_post` is the first entry in the POSTS tuple. # #", "['.rst', '.txt'], \"markdown\": ['.md', '.mdown', '.markdown'], \"textile\": ['.textile'], \"txt2tags\": ['.t2t'], \"bbcode\": ['.bb'], \"wiki\":", "parts of the indexes plugin. # Use with care. # DISABLE_INDEXES = False", "passed to markdown extensions (See https://python-markdown.github.io/reference/) # Default is {} (no config at", "where you wish to push to, using github_deploy. GITHUB_REMOTE_NAME = 'origin' # Whether", "default: 'cache' # CACHE_FOLDER = 'cache' # ############################################################################# # Image Gallery Options #", "CATEGORY_ALLOW_HIERARCHIES = False # If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written", "'Contents © {date} {author} {license}' RSS_COPYRIGHT_FORMATS = CONTENT_FOOTER_FORMATS # To use comments, you", "be used for directory indexes? # Defaults to index.html # Common other alternatives:", "to match. Check the available # list from Wikipedia: TIMEZONE = \"Europe/London\" #", "10 # Extra things you want in the pages HEAD tag. This will", "in a large box, with the previewimage as its background. 'featured_large': False, #", "by <a href=\"https://getnikola.com\" rel=\"nofollow\">Nikola</a> {license}' # Things that will be passed to CONTENT_FOOTER.format().", "0 = using DATE_FORMAT and TIMEZONE (without JS) # 1 = using LUXON_DATE_FORMAT", "{ DEFAULT_LANG: '' } # URLs to other posts/pages can take 3 forms:", "resources from # /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.", "False # Add the absolute paths to directories containing plugins to use them.", "# Do you want to add a Mathjax config file? # MATHJAX_CONFIG =", "of Post per Index Page # Defaults to 10 # INDEX_DISPLAY_POST_COUNT = 10", "= [] # The <hN> tags in HTML generated by certain compilers (reST/Markdown)", "feed\"), ), } # Alternative navigation links. Works the same way NAVIGATION_LINKS does,", "= \"\" # And you also need to add your COMMENT_SYSTEM_ID which #", "you want here, or even make it empty (which is # the default", "# LOCALES = {} # One or more folders containing files to be", "# <li><a class=\"addthis_button_google_plusone_share\"></a> # <li><a class=\"addthis_button_linkedin\"></a> # <li><a class=\"addthis_button_twitter\"></a> # </ul> # </div>", "None # Images will be scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE #", "might be displayed by some browsers as # the browser UI color (eg.", "will be added right # before </head> # (translatable) # EXTRA_HEAD_DATA = \"\"", "DISABLE_MAIN_ATOM_FEED = False # DISABLE_MAIN_RSS_FEED = False # Add the absolute paths to", "the others. # \"pandoc\": ['.rst', '.md', '.txt'], } # Preferred metadata format for" ]
[ "stop = str(input('Gostaria de procurar outra palavra?[S/N]: '))[0].upper() if stop == 'N': break", "= str(input('Informe a palavra que gostaria de procurar: ')) try: with open(f'acounts.txt', 'r')", "encontrado') stop = str(input('Gostaria de procurar outra palavra?[S/N]: '))[0].upper() if stop == 'N':", "palavra que gostaria de procurar: ')) try: with open(f'acounts.txt', 'r') as file: for", "as file: for lines in file.readlines(): if search in lines: print(lines) break else:", "break else: print('Não encontrado') except FileNotFoundError: print('Arquivo não encontrado') stop = str(input('Gostaria de", "a palavra que gostaria de procurar: ')) try: with open(f'acounts.txt', 'r') as file:", "except FileNotFoundError: print('Arquivo não encontrado') stop = str(input('Gostaria de procurar outra palavra?[S/N]: '))[0].upper()", "str(input('Informe a palavra que gostaria de procurar: ')) try: with open(f'acounts.txt', 'r') as", "print('Arquivo não encontrado') stop = str(input('Gostaria de procurar outra palavra?[S/N]: '))[0].upper() if stop", "if search in lines: print(lines) break else: print('Não encontrado') except FileNotFoundError: print('Arquivo não", "FileNotFoundError: print('Arquivo não encontrado') stop = str(input('Gostaria de procurar outra palavra?[S/N]: '))[0].upper() if", "in file.readlines(): if search in lines: print(lines) break else: print('Não encontrado') except FileNotFoundError:", "print('Não encontrado') except FileNotFoundError: print('Arquivo não encontrado') stop = str(input('Gostaria de procurar outra", "file: for lines in file.readlines(): if search in lines: print(lines) break else: print('Não", "gostaria de procurar: ')) try: with open(f'acounts.txt', 'r') as file: for lines in", "search = str(input('Informe a palavra que gostaria de procurar: ')) try: with open(f'acounts.txt',", "file.readlines(): if search in lines: print(lines) break else: print('Não encontrado') except FileNotFoundError: print('Arquivo", "search in lines: print(lines) break else: print('Não encontrado') except FileNotFoundError: print('Arquivo não encontrado')", "'r') as file: for lines in file.readlines(): if search in lines: print(lines) break", "try: with open(f'acounts.txt', 'r') as file: for lines in file.readlines(): if search in", "procurar: ')) try: with open(f'acounts.txt', 'r') as file: for lines in file.readlines(): if", "encontrado') except FileNotFoundError: print('Arquivo não encontrado') stop = str(input('Gostaria de procurar outra palavra?[S/N]:", "open(f'acounts.txt', 'r') as file: for lines in file.readlines(): if search in lines: print(lines)", "print(lines) break else: print('Não encontrado') except FileNotFoundError: print('Arquivo não encontrado') stop = str(input('Gostaria", "lines: print(lines) break else: print('Não encontrado') except FileNotFoundError: print('Arquivo não encontrado') stop =", "')) try: with open(f'acounts.txt', 'r') as file: for lines in file.readlines(): if search", "with open(f'acounts.txt', 'r') as file: for lines in file.readlines(): if search in lines:", "não encontrado') stop = str(input('Gostaria de procurar outra palavra?[S/N]: '))[0].upper() if stop ==", "True: search = str(input('Informe a palavra que gostaria de procurar: ')) try: with", "de procurar: ')) try: with open(f'acounts.txt', 'r') as file: for lines in file.readlines():", "for lines in file.readlines(): if search in lines: print(lines) break else: print('Não encontrado')", "in lines: print(lines) break else: print('Não encontrado') except FileNotFoundError: print('Arquivo não encontrado') stop", "else: print('Não encontrado') except FileNotFoundError: print('Arquivo não encontrado') stop = str(input('Gostaria de procurar", "while True: search = str(input('Informe a palavra que gostaria de procurar: ')) try:", "lines in file.readlines(): if search in lines: print(lines) break else: print('Não encontrado') except", "que gostaria de procurar: ')) try: with open(f'acounts.txt', 'r') as file: for lines" ]
[ "could be <= than 90, but that would mean a location in the", "zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) def", "== len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc) # check first few are", "4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None, ) # remove all gsp systems", "a location in the middle of the sea, # which is impossible for", "2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) def test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP locations\"\"\" local_path =", "len(example.x_osgb) > 0 assert pd.Timestamp(example.time[0].values) <= end_dt assert pd.Timestamp(example.time[0].values) >= start_dt def test_gsp_pv_data_source_get_batch():", "len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" start_dt =", "for GSPDataSource \"\"\" import os from datetime import datetime import pandas as pd", "(t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps] == t0_datetimes_utc[1]).all() # check all datetimes t0_datetimes_utc_all_gsps_overlap =", "northern_boundary_osgb=north_osgb_median ) assert len(gsp_power.columns) == len(gsp.gsp_power.columns) / 2 assert len(metadata) == len(gsp.metadata) /", "GSP init\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" _ = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4,", "sure it is not in lat/lon. # Note that OSGB could be <=", "sea, # which is impossible for GSP data assert locations_x[0] > 90 assert", "90 lat, lon = osgb_to_lat_lon(locations_x, locations_y) assert 0 < lat[0] < 90 #", "zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) batch_size", "= t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example(): \"\"\"Test GSP example\"\"\" local_path =", "2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x) ==", "GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" start_dt = datetime(2020, 4, 1) end_dt", "lon = osgb_to_lat_lon(locations_x, locations_y) assert 0 < lat[0] < 90 # this makes", "len(batch.y_osgb[1]) assert len(batch.x_osgb[2]) > 0 # assert T0_DT in batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test", "zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None, )", "drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0 ) assert len(gsp_power.columns) == 0 assert len(metadata) == 0", "= gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], ) assert len(batch.power_mw[0]) ==", "end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert", "4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) def test_gsp_pv_data_source_get_locations(): \"\"\"Test", "locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x) == len(locations_y) # This makes sure it is", "be <= than 90, but that would mean a location in the middle", "len(example.id) == len(example.power_mw[0]) assert len(example.x_osgb) == len(example.y_osgb) assert len(example.x_osgb) > 0 assert pd.Timestamp(example.time[0].values)", "len(batch.id[0]) == len(batch.x_osgb[0]) assert len(batch.x_osgb[1]) == len(batch.y_osgb[1]) assert len(batch.x_osgb[2]) > 0 # assert", "= gsp.gsp_power.index[0:10] x_locations = gsp.metadata.location_x ( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, ) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert", "in the middle of the sea, # which is impossible for GSP data", "makes sure it is not in lat/lon. # Note that OSGB could be", "which is impossible for GSP data assert locations_x[0] > 90 assert locations_y[0] >", "that OSGB could be <= than 90, but that would mean a location", "impossible for GSP data assert locations_x[0] > 90 assert locations_y[0] > 90 lat,", "4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) locations_x, locations_y =", "assert len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc) # check first few are the same", "north of a boundary works\"\"\" gsp = GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020,", "start_dt def test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP batch\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp =", "t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, ) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) ==", "= len(gsp.metadata) t0_datetimes_utc = gsp.gsp_power.index[0:10] x_locations = gsp.metadata.location_x ( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, )", "batch = gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], ) assert len(batch.power_mw[0])", "systems gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0 ) assert len(gsp_power.columns) == 0", "datetime import pandas as pd import nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source import ( GSPDataSource, drop_gsp_north_of_boundary,", "> 90 lat, lon = osgb_to_lat_lon(locations_x, locations_y) assert 0 < lat[0] < 90", "1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) N_gsps = len(gsp.metadata) t0_datetimes_utc", "this makes sure it is in lat/lon assert -90 < lon[0] < 90", "= gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], ) assert len(example.id) == len(example.power_mw[0]) assert len(example.x_osgb) ==", "len(example.x_osgb) == len(example.y_osgb) assert len(example.x_osgb) > 0 assert pd.Timestamp(example.time[0].values) <= end_dt assert pd.Timestamp(example.time[0].values)", "it is in lat/lon assert -90 < lon[0] < 90 # this makes", "history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) def test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP locations\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__)", ") batch_size = 10 x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch = gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size :", "Note that OSGB could be <= than 90, but that would mean a", "\"\"\"Test GSP batch\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020,", "meters_per_pixel=2000, northern_boundary_osgb=None, ) # remove all gsp systems gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power,", "= drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0 ) assert len(gsp_power.columns) == 0 assert len(metadata) ==", "GSP batch\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4,", "def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that dropping GSP north of a boundary works\"\"\" gsp =", "image_size_pixels=64, meters_per_pixel=2000, ) def test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP locations\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\"", "end_dt = datetime(2020, 4, 1) gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020,", "zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) N_gsps", "check second set of datetimes assert (x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps] == x_locations.values).all()", "a boundary works\"\"\" gsp = GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2),", "image_size_pixels=64, meters_per_pixel=2000, ) N_gsps = len(gsp.metadata) t0_datetimes_utc = gsp.gsp_power.index[0:10] x_locations = gsp.metadata.location_x (", "( GSPDataSource, drop_gsp_north_of_boundary, ) from nowcasting_dataset.geospatial import osgb_to_lat_lon def test_gsp_pv_data_source_init(): \"\"\"Test GSP init\"\"\"", "GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, )", "few are the same datetime assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all()", "assert len(gsp_power.columns) == 0 assert len(metadata) == 0 # remove half the systems", "4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example =", "nowcasting_dataset.geospatial import osgb_to_lat_lon def test_gsp_pv_data_source_init(): \"\"\"Test GSP init\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\"", "of the sea, # which is impossible for GSP data assert locations_x[0] >", "assert (t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps] == t0_datetimes_utc[1]).all() # check all datetimes t0_datetimes_utc_all_gsps_overlap", "* len(t0_datetimes_utc) # check first few are the same datetime assert (x_centers_osgb_all_gsps[0:N_gsps] ==", "len(example.power_mw[0]) assert len(example.x_osgb) == len(example.y_osgb) assert len(example.x_osgb) > 0 assert pd.Timestamp(example.time[0].values) <= end_dt", "len(batch.power_mw[0]) == 4 assert len(batch.id[0]) == len(batch.x_osgb[0]) assert len(batch.x_osgb[1]) == len(batch.y_osgb[1]) assert len(batch.x_osgb[2])", "history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None, ) # remove all gsp systems gsp_power, metadata", "test_gsp_pv_data_source_init(): \"\"\"Test GSP init\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" _ = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\",", "90 assert locations_y[0] > 90 lat, lon = osgb_to_lat_lon(locations_x, locations_y) assert 0 <", "assert locations_y[0] > 90 lat, lon = osgb_to_lat_lon(locations_x, locations_y) assert 0 < lat[0]", "gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x) == len(locations_y) # This makes sure it is not in", "== len(example.y_osgb) assert len(example.x_osgb) > 0 assert pd.Timestamp(example.time[0].values) <= end_dt assert pd.Timestamp(example.time[0].values) >=", "gsp.metadata, northern_boundary_osgb=north_osgb_median ) assert len(gsp_power.columns) == len(gsp.gsp_power.columns) / 2 assert len(metadata) == len(gsp.metadata)", "len(example.y_osgb) assert len(example.x_osgb) > 0 assert pd.Timestamp(example.time[0].values) <= end_dt assert pd.Timestamp(example.time[0].values) >= start_dt", "# assert T0_DT in batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that dropping GSP north of", "zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) locations_x,", "len(t0_datetimes_utc) # check first few are the same datetime assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all()", "remove half the systems north_osgb_median = int(gsp.metadata.location_y.median()) gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata,", "0 assert len(metadata) == 0 # remove half the systems north_osgb_median = int(gsp.metadata.location_y.median())", "assert len(batch.x_osgb[1]) == len(batch.y_osgb[1]) assert len(batch.x_osgb[2]) > 0 # assert T0_DT in batch[3].keys()", "lat/lon assert -90 < lon[0] < 90 # this makes sure it is", "assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc) # check first", ") # remove all gsp systems gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0", "0 < lat[0] < 90 # this makes sure it is in lat/lon", "end_dt assert pd.Timestamp(example.time[0].values) >= start_dt def test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP batch\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__)", "that dropping GSP north of a boundary works\"\"\" gsp = GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020,", "# which is impossible for GSP data assert locations_x[0] > 90 assert locations_y[0]", "not in lat/lon. # Note that OSGB could be <= than 90, but", "import pandas as pd import nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source import ( GSPDataSource, drop_gsp_north_of_boundary, )", "def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource(", "= 10 x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch = gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 *", "x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all() # check second set of datetimes assert (x_centers_osgb_all_gsps[N_gsps", "== len(x_locations) * len(t0_datetimes_utc) # check first few are the same datetime assert", "-90 < lon[0] < 90 # this makes sure it is in lat/lon", "len(batch.x_osgb[1]) == len(batch.y_osgb[1]) assert len(batch.x_osgb[2]) > 0 # assert T0_DT in batch[3].keys() def", "this makes sure it is in lat/lon def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP example\"\"\" local_path", "GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4,", "2 * N_gsps] == t0_datetimes_utc[1]).all() # check all datetimes t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert", "gsp.gsp_power.index[0:10] x_locations = gsp.metadata.location_x ( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, ) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps)", "lat, lon = osgb_to_lat_lon(locations_x, locations_y) assert 0 < lat[0] < 90 # this", "1) end_dt = datetime(2020, 4, 1) gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1),", ": 2 * batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], ) assert len(batch.power_mw[0]) == 4 assert len(batch.id[0])", "end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None, ) # remove all gsp", "int(gsp.metadata.location_y.median()) gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median ) assert len(gsp_power.columns) == len(gsp.gsp_power.columns)", "len(gsp_power.columns) == 0 assert len(metadata) == 0 # remove half the systems north_osgb_median", "pd import nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source import ( GSPDataSource, drop_gsp_north_of_boundary, ) from nowcasting_dataset.geospatial import", "osgb_to_lat_lon(locations_x, locations_y) assert 0 < lat[0] < 90 # this makes sure it", "T0_DT in batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that dropping GSP north of a boundary", "N_gsps = len(gsp.metadata) t0_datetimes_utc = gsp.gsp_power.index[0:10] x_locations = gsp.metadata.location_x ( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps,", "osgb_to_lat_lon def test_gsp_pv_data_source_init(): \"\"\"Test GSP init\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" _ =", "# Note that OSGB could be <= than 90, but that would mean", "2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example = gsp.get_example(", "os from datetime import datetime import pandas as pd import nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source", "<= than 90, but that would mean a location in the middle of", "len(locations_x) == len(locations_y) # This makes sure it is not in lat/lon. #", "assert len(example.x_osgb) > 0 assert pd.Timestamp(example.time[0].values) <= end_dt assert pd.Timestamp(example.time[0].values) >= start_dt def", "set of datetimes assert (x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps", ") assert len(gsp_power.columns) == len(gsp.gsp_power.columns) / 2 assert len(metadata) == len(gsp.metadata) / 2", "in lat/lon def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp", "\"/..\" start_dt = datetime(2020, 4, 1) end_dt = datetime(2020, 4, 1) gsp =", "import nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source import ( GSPDataSource, drop_gsp_north_of_boundary, ) from nowcasting_dataset.geospatial import osgb_to_lat_lon", "len(locations_y) # This makes sure it is not in lat/lon. # Note that", "_ = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64,", "import osgb_to_lat_lon def test_gsp_pv_data_source_init(): \"\"\"Test GSP init\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" _", "makes sure it is in lat/lon def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP example\"\"\" local_path =", "of a boundary works\"\"\" gsp = GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4,", "= os.path.dirname(nowcasting_dataset.__file__) + \"/..\" _ = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4,", "\"\"\" Tests for GSPDataSource \"\"\" import os from datetime import datetime import pandas", "local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" start_dt = datetime(2020, 4, 1) end_dt = datetime(2020,", "first few are the same datetime assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps] ==", "os.path.dirname(nowcasting_dataset.__file__) + \"/..\" _ = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2),", "4, 1) end_dt = datetime(2020, 4, 1) gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4,", "assert (x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps : 2 *", "# remove all gsp systems gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0 )", "x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps] == t0_datetimes_utc[1]).all() # check all datetimes", "half the systems north_osgb_median = int(gsp.metadata.location_y.median()) gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median", "forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) batch_size = 10 x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch =", "end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example", "== 4 assert len(batch.id[0]) == len(batch.x_osgb[0]) assert len(batch.x_osgb[1]) == len(batch.y_osgb[1]) assert len(batch.x_osgb[2]) >", "0 # assert T0_DT in batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that dropping GSP north", "meters_per_pixel=2000, ) N_gsps = len(gsp.metadata) t0_datetimes_utc = gsp.gsp_power.index[0:10] x_locations = gsp.metadata.location_x ( t0_datetimes_utc_all_gsps,", "example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1),", "# remove half the systems north_osgb_median = int(gsp.metadata.location_y.median()) gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power,", "nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source import ( GSPDataSource, drop_gsp_north_of_boundary, ) from nowcasting_dataset.geospatial import osgb_to_lat_lon def", "for GSP data assert locations_x[0] > 90 assert locations_y[0] > 90 lat, lon", "== t0_datetimes_utc[1]).all() # check all datetimes t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps)", "forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None, ) # remove all gsp systems gsp_power, metadata =", ") assert len(example.id) == len(example.power_mw[0]) assert len(example.x_osgb) == len(example.y_osgb) assert len(example.x_osgb) > 0", "# check all datetimes t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example():", "y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch = gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size],", "gsp.metadata, northern_boundary_osgb=0 ) assert len(gsp_power.columns) == 0 assert len(metadata) == 0 # remove", "mean a location in the middle of the sea, # which is impossible", "lat[0] < 90 # this makes sure it is in lat/lon assert -90", "start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) def test_gsp_pv_data_source_get_locations():", "are the same datetime assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all() #", "os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2),", "# check second set of datetimes assert (x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps] ==", "90, but that would mean a location in the middle of the sea,", "= os.path.dirname(nowcasting_dataset.__file__) + \"/..\" start_dt = datetime(2020, 4, 1) end_dt = datetime(2020, 4,", "assert len(batch.id[0]) == len(batch.x_osgb[0]) assert len(batch.x_osgb[1]) == len(batch.y_osgb[1]) assert len(batch.x_osgb[2]) > 0 #", "end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) def test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP locations\"\"\"", "\"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60,", "== x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps] == t0_datetimes_utc[1]).all() # check all", "sure it is in lat/lon def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__)", "= osgb_to_lat_lon(locations_x, locations_y) assert 0 < lat[0] < 90 # this makes sure", "= datetime(2020, 4, 1) gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4,", "== len(example.power_mw[0]) assert len(example.x_osgb) == len(example.y_osgb) assert len(example.x_osgb) > 0 assert pd.Timestamp(example.time[0].values) <=", "4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) batch_size = 10", "assert len(example.x_osgb) == len(example.y_osgb) assert len(example.x_osgb) > 0 assert pd.Timestamp(example.time[0].values) <= end_dt assert", "lat/lon. # Note that OSGB could be <= than 90, but that would", "start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) x_locations, y_locations", "the middle of the sea, # which is impossible for GSP data assert", "1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) def test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP", "data assert locations_x[0] > 90 assert locations_y[0] > 90 lat, lon = osgb_to_lat_lon(locations_x,", "locations_y[0] > 90 lat, lon = osgb_to_lat_lon(locations_x, locations_y) assert 0 < lat[0] <", "x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch = gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size], x_centers_osgb=x_locations[0:batch_size],", "gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64,", "start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) batch_size =", ") assert len(batch.power_mw[0]) == 4 assert len(batch.id[0]) == len(batch.x_osgb[0]) assert len(batch.x_osgb[1]) == len(batch.y_osgb[1])", ") from nowcasting_dataset.geospatial import osgb_to_lat_lon def test_gsp_pv_data_source_init(): \"\"\"Test GSP init\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__)", "== len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc) #", "1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) batch_size = 10 x_locations,", "1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10])", "len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc) # check", "test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP locations\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\",", "end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) N_gsps = len(gsp.metadata) t0_datetimes_utc =", "assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(x_locations) *", "<= end_dt assert pd.Timestamp(example.time[0].values) >= start_dt def test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP batch\"\"\" local_path =", "location in the middle of the sea, # which is impossible for GSP", "= datetime(2020, 4, 1) end_dt = datetime(2020, 4, 1) gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\",", "as pd import nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source import ( GSPDataSource, drop_gsp_north_of_boundary, ) from nowcasting_dataset.geospatial", "len(gsp.metadata) t0_datetimes_utc = gsp.gsp_power.index[0:10] x_locations = gsp.metadata.location_x ( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, ) =", "from nowcasting_dataset.data_sources.gsp.gsp_data_source import ( GSPDataSource, drop_gsp_north_of_boundary, ) from nowcasting_dataset.geospatial import osgb_to_lat_lon def test_gsp_pv_data_source_init():", "t0_datetimes_utc[1]).all() # check all datetimes t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps) def", "example = gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], ) assert len(example.id) == len(example.power_mw[0]) assert len(example.x_osgb)", "import ( GSPDataSource, drop_gsp_north_of_boundary, ) from nowcasting_dataset.geospatial import osgb_to_lat_lon def test_gsp_pv_data_source_init(): \"\"\"Test GSP", "from datetime import datetime import pandas as pd import nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source import", "(t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all() # check second set of datetimes assert (x_centers_osgb_all_gsps[N_gsps : 2", "\"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" start_dt = datetime(2020, 4, 1)", "meters_per_pixel=2000, ) def test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP locations\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp", "assert -90 < lon[0] < 90 # this makes sure it is in", "systems north_osgb_median = int(gsp.metadata.location_y.median()) gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median ) assert", "4, 1) gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30,", "2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) N_gsps = len(gsp.metadata) t0_datetimes_utc = gsp.gsp_power.index[0:10] x_locations", "nowcasting_dataset.data_sources.gsp.gsp_data_source import ( GSPDataSource, drop_gsp_north_of_boundary, ) from nowcasting_dataset.geospatial import osgb_to_lat_lon def test_gsp_pv_data_source_init(): \"\"\"Test", "* N_gsps] == t0_datetimes_utc[1]).all() # check all datetimes t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap)", "= gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example = gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], ) assert len(example.id) == len(example.power_mw[0])", "This makes sure it is not in lat/lon. # Note that OSGB could", "assert pd.Timestamp(example.time[0].values) <= end_dt assert pd.Timestamp(example.time[0].values) >= start_dt def test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP batch\"\"\"", "\"\"\"Test GSP locations\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020,", "= GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000,", "gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], ) assert len(example.id) == len(example.power_mw[0]) assert len(example.x_osgb) == len(example.y_osgb)", "is in lat/lon assert -90 < lon[0] < 90 # this makes sure", "len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc) # check first few are the", "makes sure it is in lat/lon assert -90 < lon[0] < 90 #", "GSP locations\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4,", "# This makes sure it is not in lat/lon. # Note that OSGB", "t0_datetimes_utc = gsp.gsp_power.index[0:10] x_locations = gsp.metadata.location_x ( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, ) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc)", "+ \"/..\" start_dt = datetime(2020, 4, 1) end_dt = datetime(2020, 4, 1) gsp", "assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all() # check second set of", "start_dt = datetime(2020, 4, 1) end_dt = datetime(2020, 4, 1) gsp = GSPDataSource(", "(x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all() # check second set of datetimes", "0 assert pd.Timestamp(example.time[0].values) <= end_dt assert pd.Timestamp(example.time[0].values) >= start_dt def test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP", "assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all() # check second set of datetimes assert (x_centers_osgb_all_gsps[N_gsps :", "it is not in lat/lon. # Note that OSGB could be <= than", "# this makes sure it is in lat/lon def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP example\"\"\"", "end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) batch_size = 10 x_locations, y_locations", "the sea, # which is impossible for GSP data assert locations_x[0] > 90", "len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\"", "len(metadata) == 0 # remove half the systems north_osgb_median = int(gsp.metadata.location_y.median()) gsp_power, metadata", "is not in lat/lon. # Note that OSGB could be <= than 90,", "t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], ) assert len(example.id) == len(example.power_mw[0]) assert len(example.x_osgb) == len(example.y_osgb) assert", "1) gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60,", "4 assert len(batch.id[0]) == len(batch.x_osgb[0]) assert len(batch.x_osgb[1]) == len(batch.y_osgb[1]) assert len(batch.x_osgb[2]) > 0", "gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch = gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], ) assert", "2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None, ) # remove all gsp systems gsp_power,", "assert len(batch.power_mw[0]) == 4 assert len(batch.id[0]) == len(batch.x_osgb[0]) assert len(batch.x_osgb[1]) == len(batch.y_osgb[1]) assert", "t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], ) assert len(batch.power_mw[0]) == 4 assert", "* batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], ) assert len(batch.power_mw[0]) == 4 assert len(batch.id[0]) == len(batch.x_osgb[0])", "second set of datetimes assert (x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps] == x_locations.values).all() assert", "local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" _ = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020,", "meters_per_pixel=2000, ) locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x) == len(locations_y) # This makes", "same datetime assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all() # check second", "assert pd.Timestamp(example.time[0].values) >= start_dt def test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP batch\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) +", ">= start_dt def test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP batch\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp", "< lat[0] < 90 # this makes sure it is in lat/lon assert", "GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None,", "1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None, ) # remove all", "zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) x_locations,", "gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median ) assert len(gsp_power.columns) == len(gsp.gsp_power.columns) / 2 assert len(metadata) ==", "( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, ) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps)", "local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020,", "OSGB could be <= than 90, but that would mean a location in", "assert 0 < lat[0] < 90 # this makes sure it is in", "t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example(): \"\"\"Test GSP example\"\"\" local_path", "# this makes sure it is in lat/lon assert -90 < lon[0] <", "t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__)", "works\"\"\" gsp = GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60,", "== 0 # remove half the systems north_osgb_median = int(gsp.metadata.location_y.median()) gsp_power, metadata =", "would mean a location in the middle of the sea, # which is", "forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) N_gsps = len(gsp.metadata) t0_datetimes_utc = gsp.gsp_power.index[0:10] x_locations = gsp.metadata.location_x", "test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that dropping GSP north of a boundary works\"\"\" gsp = GSPDataSource(", "example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" start_dt = datetime(2020, 4, 1) end_dt =", "history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) N_gsps = len(gsp.metadata) t0_datetimes_utc = gsp.gsp_power.index[0:10] x_locations =", "\"\"\"Test GSP init\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" _ = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020,", "= gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x) == len(locations_y) # This makes sure it is not", "gsp.metadata.location_x ( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, ) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps) assert", "2 * batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], ) assert len(batch.power_mw[0]) == 4 assert len(batch.id[0]) ==", "N_gsps] == t0_datetimes_utc[1]).all() # check all datetimes t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap) ==", "lon[0] < 90 # this makes sure it is in lat/lon def test_gsp_pv_data_source_get_all_locations():", ") = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps)", "1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10])", "gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median ) assert len(gsp_power.columns) == len(gsp.gsp_power.columns) /", "== len(locations_y) # This makes sure it is not in lat/lon. # Note", "+ \"/..\" _ = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30,", "forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x) == len(locations_y) #", "N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps] == t0_datetimes_utc[1]).all() # check", "batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], ) assert len(batch.power_mw[0]) == 4 assert len(batch.id[0]) == len(batch.x_osgb[0]) assert", "that would mean a location in the middle of the sea, # which", "sure it is in lat/lon assert -90 < lon[0] < 90 # this", "in lat/lon assert -90 < lon[0] < 90 # this makes sure it", "than 90, but that would mean a location in the middle of the", "GSP north of a boundary works\"\"\" gsp = GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1),", "all datetimes t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example(): \"\"\"Test GSP", "x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example = gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], ) assert len(example.id)", "datetimes t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example(): \"\"\"Test GSP example\"\"\"", "metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median ) assert len(gsp_power.columns) == len(gsp.gsp_power.columns) / 2", "gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], ) assert len(batch.power_mw[0]) == 4", "batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that dropping GSP north of a boundary works\"\"\" gsp", "history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) batch_size = 10 x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch", "< 90 # this makes sure it is in lat/lon assert -90 <", "x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], ) assert len(batch.power_mw[0]) == 4 assert len(batch.id[0]) == len(batch.x_osgb[0]) assert len(batch.x_osgb[1])", "y_centers_osgb=y_locations[0:batch_size], ) assert len(batch.power_mw[0]) == 4 assert len(batch.id[0]) == len(batch.x_osgb[0]) assert len(batch.x_osgb[1]) ==", "\"/..\" _ = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60,", "init\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" _ = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1),", "(x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps]", "locations\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1),", "meters_per_pixel=2000, ) batch_size = 10 x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch = gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size", "= GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000,", "pd.Timestamp(example.time[0].values) <= end_dt assert pd.Timestamp(example.time[0].values) >= start_dt def test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP batch\"\"\" local_path", "batch\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1),", "> 0 # assert T0_DT in batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that dropping GSP", "def test_gsp_pv_data_source_init(): \"\"\"Test GSP init\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" _ = GSPDataSource(", "\"\"\"Test that dropping GSP north of a boundary works\"\"\" gsp = GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\",", ") def test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP locations\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp =", "locations_x[0] > 90 assert locations_y[0] > 90 lat, lon = osgb_to_lat_lon(locations_x, locations_y) assert", "datetime(2020, 4, 1) end_dt = datetime(2020, 4, 1) gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020,", "4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) batch_size = 10 x_locations, y_locations =", "len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc)", "+ \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30,", "datetime(2020, 4, 1) gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2),", "assert len(batch.x_osgb[2]) > 0 # assert T0_DT in batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that", "90 # this makes sure it is in lat/lon assert -90 < lon[0]", "4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) x_locations, y_locations =", "middle of the sea, # which is impossible for GSP data assert locations_x[0]", "== len(batch.x_osgb[0]) assert len(batch.x_osgb[1]) == len(batch.y_osgb[1]) assert len(batch.x_osgb[2]) > 0 # assert T0_DT", "> 90 assert locations_y[0] > 90 lat, lon = osgb_to_lat_lon(locations_x, locations_y) assert 0", "len(x_locations) * len(t0_datetimes_utc) # check first few are the same datetime assert (x_centers_osgb_all_gsps[0:N_gsps]", "4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None, ) # remove", "assert len(example.id) == len(example.power_mw[0]) assert len(example.x_osgb) == len(example.y_osgb) assert len(example.x_osgb) > 0 assert", "= int(gsp.metadata.location_y.median()) gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median ) assert len(gsp_power.columns) ==", "the same datetime assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all() # check", "4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) def test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP locations\"\"\" local_path", "assert locations_x[0] > 90 assert locations_y[0] > 90 lat, lon = osgb_to_lat_lon(locations_x, locations_y)", "northern_boundary_osgb=None, ) # remove all gsp systems gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata,", "drop_gsp_north_of_boundary, ) from nowcasting_dataset.geospatial import osgb_to_lat_lon def test_gsp_pv_data_source_init(): \"\"\"Test GSP init\"\"\" local_path =", "image_size_pixels=64, meters_per_pixel=2000, ) locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x) == len(locations_y) # This", "os.path.dirname(nowcasting_dataset.__file__) + \"/..\" start_dt = datetime(2020, 4, 1) end_dt = datetime(2020, 4, 1)", "start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None, ) #", "the systems north_osgb_median = int(gsp.metadata.location_y.median()) gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median )", "import os from datetime import datetime import pandas as pd import nowcasting_dataset from", "def test_gsp_pv_data_source_get_example(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" start_dt = datetime(2020,", "lat/lon def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp =", "image_size_pixels=64, meters_per_pixel=2000, ) batch_size = 10 x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch = gsp.get_batch(", "10 x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch = gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size],", "gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0 ) assert len(gsp_power.columns) == 0 assert", "north_osgb_median = int(gsp.metadata.location_y.median()) gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median ) assert len(gsp_power.columns)", "gsp systems gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0 ) assert len(gsp_power.columns) ==", "gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example = gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], ) assert len(example.id) == len(example.power_mw[0]) assert", "t0_datetimes_utc[0]).all() # check second set of datetimes assert (x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps]", ": 2 * N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps] ==", "4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) N_gsps = len(gsp.metadata) t0_datetimes_utc = gsp.gsp_power.index[0:10]", "in batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that dropping GSP north of a boundary works\"\"\"", "meters_per_pixel=2000, ) x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example = gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], )", "== 0 assert len(metadata) == 0 # remove half the systems north_osgb_median =", "start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) locations_x, locations_y", "y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example = gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], ) assert len(example.id) ==", "import datetime import pandas as pd import nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source import ( GSPDataSource,", ") locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x) == len(locations_y) # This makes sure", "len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc) # check first few", "# check first few are the same datetime assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all() assert", "== t0_datetimes_utc[0]).all() # check second set of datetimes assert (x_centers_osgb_all_gsps[N_gsps : 2 *", "locations_y) assert 0 < lat[0] < 90 # this makes sure it is", "batch_size = 10 x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch = gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2", "all gsp systems gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0 ) assert len(gsp_power.columns)", "of datetimes assert (x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps :", "metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0 ) assert len(gsp_power.columns) == 0 assert len(metadata)", "locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x) == len(locations_y) # This makes sure it", "test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP batch\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\",", "test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\",", "history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x) == len(locations_y)", "len(batch.x_osgb[2]) > 0 # assert T0_DT in batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that dropping", "= gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) ==", "gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0 ) assert len(gsp_power.columns) == 0 assert len(metadata) == 0 #", "== len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" start_dt", "assert T0_DT in batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder): \"\"\"Test that dropping GSP north of a", "start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) N_gsps =", "* N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps] == t0_datetimes_utc[1]).all() #", "is in lat/lon def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\"", "check first few are the same datetime assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps]", "from nowcasting_dataset.geospatial import osgb_to_lat_lon def test_gsp_pv_data_source_init(): \"\"\"Test GSP init\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) +", "4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) N_gsps = len(gsp.metadata)", "assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) +", "datetime import datetime import pandas as pd import nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source import (", ") N_gsps = len(gsp.metadata) t0_datetimes_utc = gsp.gsp_power.index[0:10] x_locations = gsp.metadata.location_x ( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps,", "test_gsp_pv_data_source_get_example(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" start_dt = datetime(2020, 4,", "assert len(metadata) == 0 # remove half the systems north_osgb_median = int(gsp.metadata.location_y.median()) gsp_power,", "datetime assert (x_centers_osgb_all_gsps[0:N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all() # check second set", "image_size_pixels=64, meters_per_pixel=2000, ) x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example = gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0],", "= drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median ) assert len(gsp_power.columns) == len(gsp.gsp_power.columns) / 2 assert", "2 * N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps : 2 * N_gsps] == t0_datetimes_utc[1]).all()", "def test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP locations\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource(", "len(batch.x_osgb[0]) assert len(batch.x_osgb[1]) == len(batch.y_osgb[1]) assert len(batch.x_osgb[2]) > 0 # assert T0_DT in", "0 # remove half the systems north_osgb_median = int(gsp.metadata.location_y.median()) gsp_power, metadata = drop_gsp_north_of_boundary(", "GSPDataSource, drop_gsp_north_of_boundary, ) from nowcasting_dataset.geospatial import osgb_to_lat_lon def test_gsp_pv_data_source_init(): \"\"\"Test GSP init\"\"\" local_path", "forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) def test_gsp_pv_data_source_get_locations(): \"\"\"Test GSP locations\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) +", ") assert len(gsp_power.columns) == 0 assert len(metadata) == 0 # remove half the", "GSPDataSource \"\"\" import os from datetime import datetime import pandas as pd import", "= os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4,", "len(t0_datetimes_utc_all_gsps) == len(x_locations) * len(t0_datetimes_utc) # check first few are the same datetime", "== len(batch.y_osgb[1]) assert len(batch.x_osgb[2]) > 0 # assert T0_DT in batch[3].keys() def test_drop_gsp_north_of_boundary(test_data_folder):", "assert len(locations_x) == len(locations_y) # This makes sure it is not in lat/lon.", "90 # this makes sure it is in lat/lon def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP", "def test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP batch\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource(", "it is in lat/lon def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) +", "drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=north_osgb_median ) assert len(gsp_power.columns) == len(gsp.gsp_power.columns) / 2 assert len(metadata)", "< lon[0] < 90 # this makes sure it is in lat/lon def", "\"\"\" import os from datetime import datetime import pandas as pd import nowcasting_dataset", "in lat/lon. # Note that OSGB could be <= than 90, but that", ") x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example = gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], ) assert", ": 2 * N_gsps] == t0_datetimes_utc[1]).all() # check all datetimes t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc)", "pandas as pd import nowcasting_dataset from nowcasting_dataset.data_sources.gsp.gsp_data_source import ( GSPDataSource, drop_gsp_north_of_boundary, ) from", "boundary works\"\"\" gsp = GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30,", "y_centers_osgb_all_gsps, ) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps) assert", "remove all gsp systems gsp_power, metadata = drop_gsp_north_of_boundary( gsp.gsp_power, gsp.metadata, northern_boundary_osgb=0 ) assert", "northern_boundary_osgb=0 ) assert len(gsp_power.columns) == 0 assert len(metadata) == 0 # remove half", "datetimes assert (x_centers_osgb_all_gsps[N_gsps : 2 * N_gsps] == x_locations.values).all() assert (t0_datetimes_utc_all_gsps[N_gsps : 2", "= gsp.metadata.location_x ( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, ) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps)", "\"\"\"Test GSP example\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\" gsp = GSPDataSource( zarr_path=f\"{local_path}/tests/data/gsp/test.zarr\", start_datetime=datetime(2020,", "4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) locations_x, locations_y = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) assert len(locations_x)", "x_center_osgb=x_locations[0], y_center_osgb=y_locations[0], ) assert len(example.id) == len(example.power_mw[0]) assert len(example.x_osgb) == len(example.y_osgb) assert len(example.x_osgb)", "image_size_pixels=64, meters_per_pixel=2000, northern_boundary_osgb=None, ) # remove all gsp systems gsp_power, metadata = drop_gsp_north_of_boundary(", "is impossible for GSP data assert locations_x[0] > 90 assert locations_y[0] > 90", "== x_locations.values).all() assert (t0_datetimes_utc_all_gsps[0:N_gsps] == t0_datetimes_utc[0]).all() # check second set of datetimes assert", "forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example = gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0], x_center_osgb=x_locations[0],", "x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, ) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps)", "history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:10]) example = gsp.get_example( t0_datetime_utc=gsp.gsp_power.index[0],", "gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps) == len(x_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(y_centers_osgb_all_gsps) assert len(t0_datetimes_utc_all_gsps) == len(x_locations)", "Tests for GSPDataSource \"\"\" import os from datetime import datetime import pandas as", "x_locations = gsp.metadata.location_x ( t0_datetimes_utc_all_gsps, x_centers_osgb_all_gsps, y_centers_osgb_all_gsps, ) = gsp.get_all_locations(t0_datetimes_utc=t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps) ==", "y_center_osgb=y_locations[0], ) assert len(example.id) == len(example.power_mw[0]) assert len(example.x_osgb) == len(example.y_osgb) assert len(example.x_osgb) >", "= gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size]) batch = gsp.get_batch( t0_datetimes_utc=gsp.gsp_power.index[batch_size : 2 * batch_size], x_centers_osgb=x_locations[0:batch_size], y_centers_osgb=y_locations[0:batch_size], )", "2), history_minutes=30, forecast_minutes=60, image_size_pixels=64, meters_per_pixel=2000, ) batch_size = 10 x_locations, y_locations = gsp.get_locations(t0_datetimes_utc=gsp.gsp_power.index[0:batch_size])", "dropping GSP north of a boundary works\"\"\" gsp = GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4,", "GSP data assert locations_x[0] > 90 assert locations_y[0] > 90 lat, lon =", "check all datetimes t0_datetimes_utc_all_gsps_overlap = t0_datetimes_utc_all_gsps.union(t0_datetimes_utc) assert len(t0_datetimes_utc_all_gsps_overlap) == len(t0_datetimes_utc_all_gsps) def test_gsp_pv_data_source_get_example(): \"\"\"Test", "pd.Timestamp(example.time[0].values) >= start_dt def test_gsp_pv_data_source_get_batch(): \"\"\"Test GSP batch\"\"\" local_path = os.path.dirname(nowcasting_dataset.__file__) + \"/..\"", "but that would mean a location in the middle of the sea, #", "> 0 assert pd.Timestamp(example.time[0].values) <= end_dt assert pd.Timestamp(example.time[0].values) >= start_dt def test_gsp_pv_data_source_get_batch(): \"\"\"Test", "gsp = GSPDataSource( zarr_path=f\"{test_data_folder}/gsp/test.zarr\", start_datetime=datetime(2020, 4, 1), end_datetime=datetime(2020, 4, 2), history_minutes=30, forecast_minutes=60, image_size_pixels=64,", "< 90 # this makes sure it is in lat/lon def test_gsp_pv_data_source_get_all_locations(): \"\"\"Test" ]
[ "create_object_leak_tracker(start=False): global _object_leak_tracker from performance.object_leak_tracker import ObjectLeakTracker if _object_leak_tracker is None: _object_leak_tracker =", "get_eco_footprint_value(*_, **__): return 0 @staticmethod def get_rent(*_, **__): return 0 @staticmethod def get_lot_description_id(*_,", "= _zone.get_world_id get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker = _zone.get_hide_from_lot_picker with sims4.reload.protected(globals()):", "if travel_group is not None: if require_active_household and not travel_group.is_active_sim_in_travel_group: return else: return", "current_zone().social_group_cluster_service def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client)", "hasattr(zone, 'object_routing_service'): return zone.object_routing_service def get_landlord_service(): return getattr(game_services.service_manager, 'landlord_service', None) def get_roommate_service(): return", "_zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None): if household_id: return get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id)", "getattr(game_services.service_manager, 'holiday_service', None) def global_policy_service(): return getattr(game_services.service_manager, 'global_policy_service', None) def narrative_service(): return getattr(game_services.service_manager,", "get_sickness_service(): return game_services.service_manager.sickness_service def get_curfew_service(): return game_services.service_manager.curfew_service def get_locale(): client = get_first_client() return", "= None _intern_service = None _terrain_service = None definition_manager = None snippet_manager =", "global _terrain_service if _terrain_service is None: from services.terrain_service import TerrainService _terrain_service = TerrainService()", "None: return current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service def get_photography_service(): return current_zone().photography_service def social_group_cluster_service(): return current_zone().social_group_cluster_service", "_server_clock_service is None: return return _server_clock_service def create_server_clock(initial_ticks): global _server_clock_service import clock _server_clock_service", "= _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker = _zone.get_hide_from_lot_picker with sims4.reload.protected(globals()): tuning_managers = InstanceTuningManagers() get_instance_manager = tuning_managers.__getitem__", "= DistributorService() _intern_service = InternService() init_critical_services = [server_clock_service(), get_persistence_service()] services = [_distributor_service, _intern_service,", "weather_service(): return getattr(game_services.service_manager, 'weather_service', None) def season_service(): return getattr(game_services.service_manager, 'season_service', None) def lot_decoration_service():", "is not None: return zone.daycare_service def get_adoption_service(): return current_zone().adoption_service def get_laundry_service(): zone =", "def client_manager(): return game_services.service_manager.client_manager def get_first_client(): return client_manager().get_first_client() def get_selectable_sims(): return get_first_client().selectable_sims def", "game_services.service_manager.time_service def game_clock_service(): return game_services.service_manager.game_clock def server_clock_service(): if _server_clock_service is None: return return", "False def get_object_leak_tracker(): return _object_leak_tracker def get_zone_manager(): return _zone_manager def current_zone(): if _zone_manager", "= None snippet_manager = None _terrain_object = None _object_leak_tracker = None for definition", "from loading start is {}'.format(time_delta)) logger.info('Time delta from loading start is {}'.format(time_delta)) return", "True return False def get_object_leak_tracker(): return _object_leak_tracker def get_zone_manager(): return _zone_manager def current_zone():", "not None: lot = home_zone.lot if lot is not None: return lot.lot_id def", "None: time_delta = time_stamp else: time_delta = time_stamp - time_delta production_logger.info('Time delta from", "None) def organization_service(): return getattr(game_services.service_manager, 'organization_service', None) def get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service def street_service():", "return game_services.service_manager.style_service def get_tutorial_service(): return game_services.service_manager.tutorial_service def calendar_service(): return current_zone().calendar_service def get_rabbit_hole_service(): return", "venue_service(): return current_zone().venue_service def venue_game_service(): return getattr(game_services.service_manager, 'venue_game_service', None) def zone_spin_up_service(): return current_zone().zone_spin_up_service", "not None: return zone.daycare_service def get_adoption_service(): return current_zone().adoption_service def get_laundry_service(): zone = current_zone()", "def business_service(): bs = game_services.service_manager.business_service return bs def get_terrain_service(): global _terrain_service if _terrain_service", "time_delta is None: time_delta = time_stamp else: time_delta = time_stamp - time_delta production_logger.info('Time", "_object_leak_tracker _zone_manager.shutdown() _zone_manager = None tuning_managers.clear() _account_service = None _event_manager = None _server_clock_service", "_zone_manager is not None: return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot(): zone = current_zone() if", "None: return zone.travel_group_manager return return _zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None): if household_id: return get_utilities_manager_by_household_id(household_id) return", "zone is not None: return zone.inventory_manager return return _zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None): if zone_id", "active_household_lot_id(): household = active_household() if household is not None: home_zone = get_zone(household.home_zone_id) if", "production_logger.info('GC enabled') gc_collection_enable = True time_stamp = time.time() production_logger.info('TimeStampService start at {}'.format(time_stamp)) logger.info('TimeStampService", "'laundry_service'): return zone.laundry_service def get_object_routing_service(): zone = current_zone() if zone is not None", "_terrain_service = None _distributor_service = None _intern_service = None if _object_leak_tracker is not", "return current_zone().n_p_c_hosted_situation_service def ensemble_service(): return current_zone().ensemble_service def sim_filter_service(zone_id=None): if zone_id is None: return", "import DistributorService from intern_service import InternService from server.account_service import AccountService from services.persistence_service import", "household is not None: home_zone = get_zone(household.home_zone_id) if home_zone is not None: lot", "_zone.get_building_type get_eco_footprint_value = _zone.get_eco_footprint_value get_rent = _zone.get_rent get_lot_description_id = _zone.get_lot_description_id get_world_description_id = _zone.get_world_description_id", "lot = active_lot() if lot is not None: return lot.lot_id def client_object_managers(): if", "= venue_service() if service is not None: return service.active_venue def get_intern_service(): return _intern_service", "from services.tuning_managers import InstanceTuningManagers from sims4.resources import INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager import TuningInstanceManager from", "def current_zone_info(): zone = current_zone() return zone.get_zone_info() def current_region(): zone = current_zone() if", "_zone_manager.get(zone_id) if zone is not None: return zone.object_manager def inventory_manager(zone_id=None): if zone_id is", "None if _object_leak_tracker is not None: _object_leak_tracker = None def create_object_leak_tracker(start=False): global _object_leak_tracker", "is not None: return zone.inventory_manager return return _zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None): if zone_id is", "return lot.lot_id def privacy_service(): return current_zone().privacy_service def autonomy_service(): return current_zone().autonomy_service def get_aging_service(): return", "zone = current_zone() if zone is not None and hasattr(zone, 'object_routing_service'): return zone.object_routing_service", "get_zone_modifier_service(): return current_zone().zone_modifier_service def get_demographics_service(): return current_zone().demographics_service def get_service_npc_service(): return current_zone().service_npc_service def conditional_layer_service():", "'venue_game_service', None) def zone_spin_up_service(): return current_zone().zone_spin_up_service def household_manager(): return game_services.service_manager.household_manager def travel_group_manager(zone_id=None): if", "game_services.service_manager.event_manager_service def get_current_venue(): service = venue_service() if service is not None: return service.active_venue", "None: from services.terrain_service import TerrainService _terrain_service = TerrainService() return _terrain_service def call_to_action_service(): return", "= None _intern_service = None if _object_leak_tracker is not None: _object_leak_tracker = None", "INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services, services) def stop_global_services():", "multi_part_condition_list(): return current_zone().multi_part_condition_list def get_event_manager(): return game_services.service_manager.event_manager_service def get_current_venue(): service = venue_service() if", "return current_zone().story_progression_service def daycare_service(): zone = current_zone() if zone is not None: return", "= None _distributor_service = None _intern_service = None if _object_leak_tracker is not None:", "performance.object_leak_tracker import ObjectLeakTracker if _object_leak_tracker is None: _object_leak_tracker = ObjectLeakTracker() if start: _object_leak_tracker.start_tracking()", "def get_world_id(*_, **__): pass @staticmethod def get_world_and_lot_description_id_from_zone_id(*_, **__): pass @staticmethod def get_is_eco_footprint_compatible_for_world_description(*_, **__):", "is not None: return game_services.service_manager.client_object_managers return () def sim_info_manager(): return game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None):", "def owning_household_of_active_lot(): zone = current_zone() if zone is not None: return household_manager().get(zone.lot.owner_household_id) def", "hasattr(zone, 'laundry_service'): return zone.laundry_service def get_object_routing_service(): zone = current_zone() if zone is not", "_intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services, services) def stop_global_services(): global _zone_manager, _account_service, _event_manager, _server_clock_service,", "trend_service(): return game_services.service_manager.trend_service def time_service(): return game_services.service_manager.time_service def game_clock_service(): return game_services.service_manager.game_clock def server_clock_service():", "current_zone() if zone is not None: return zone.inventory_manager return return _zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None):", "def client_object_managers(): if game_services.service_manager is not None: return game_services.service_manager.client_object_managers return () def sim_info_manager():", "InstanceTuningManagers() get_instance_manager = tuning_managers.__getitem__ _account_service = None _zone_manager = None _server_clock_service = None", "None _terrain_service = None _distributor_service = None _intern_service = None if _object_leak_tracker is", "return game_services.service_manager.game_clock def server_clock_service(): if _server_clock_service is None: return return _server_clock_service def create_server_clock(initial_ticks):", "return game_services.service_manager.trend_service def time_service(): return game_services.service_manager.time_service def game_clock_service(): return game_services.service_manager.game_clock def server_clock_service(): if", "accessor production_logger = sims4.log.ProductionLogger('Services') logger = sims4.log.Logger('Services') time_delta = None gc_collection_enable = True", "zone.prop_manager def social_group_manager(): return current_zone().social_group_manager def client_manager(): return game_services.service_manager.client_manager def get_first_client(): return client_manager().get_first_client()", "parser.parse_known_args() if args.python_autoleak: create_object_leak_tracker() _account_service = AccountService() _zone_manager = ZoneManager() _distributor_service = DistributorService()", "def ensemble_service(): return current_zone().ensemble_service def sim_filter_service(zone_id=None): if zone_id is None: return current_zone().sim_filter_service return", "get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service(): return current_zone().ui_dialog_service def config_service(): return game_services.service_manager.config_service def travel_service():", "_persistence_service if _persistence_service is None: from services.persistence_service import PersistenceService _persistence_service = PersistenceService() return", "_zone.is_event_enabled get_building_type = _zone.get_building_type get_eco_footprint_value = _zone.get_eco_footprint_value get_rent = _zone.get_rent get_lot_description_id = _zone.get_lot_description_id", "DistributorService from intern_service import InternService from server.account_service import AccountService from services.persistence_service import PersistenceService", "current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service def get_photography_service(): return current_zone().photography_service def social_group_cluster_service(): return current_zone().social_group_cluster_service def on_client_connect(client):", "def get_event_manager(): return game_services.service_manager.event_manager_service def get_current_venue(): service = venue_service() if service is not", "not None: if require_active_household and not travel_group.is_active_sim_in_travel_group: return else: return travel_group.object_preference_tracker def get_active_sim():", "def travel_group_manager(zone_id=None): if zone_id is None: zone = current_zone() if zone is not", "current_zone().master_controller def get_persistence_service(): global _persistence_service if _persistence_service is None: from services.persistence_service import PersistenceService", "lot.lot_id def client_object_managers(): if game_services.service_manager is not None: return game_services.service_manager.client_object_managers return () def", "= InstanceTuningManagers() get_instance_manager = tuning_managers.__getitem__ _account_service = None _zone_manager = None _server_clock_service =", "return game_services.service_manager.aging_service def get_cheat_service(): return game_services.service_manager.cheat_service def neighborhood_population_service(): return current_zone().neighborhood_population_service def get_reset_and_delete_service(): return", "return current_zone().venue_service def venue_game_service(): return getattr(game_services.service_manager, 'venue_game_service', None) def zone_spin_up_service(): return current_zone().zone_spin_up_service def", "game_clock_service(): return game_services.service_manager.game_clock def server_clock_service(): if _server_clock_service is None: return return _server_clock_service def", "ObjectLeakTracker() if start: _object_leak_tracker.start_tracking() return True return False def get_object_leak_tracker(): return _object_leak_tracker def", "return service.active_venue def get_intern_service(): return _intern_service def get_zone_situation_manager(zone_id=None): if zone_id is None: return", "if zone is not None: return zone.prop_manager def social_group_manager(): return current_zone().social_group_manager def client_manager():", "return zone.inventory_manager return return _zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None): if zone_id is None: zone =", "return travel_group.object_preference_tracker def get_active_sim(): client = client_manager().get_first_client() if client is not None: return", "game_services.service_manager.game_clock def server_clock_service(): if _server_clock_service is None: return return _server_clock_service def create_server_clock(initial_ticks): global", "_server_clock_service, _persistence_service, _terrain_service, _distributor_service, _intern_service, _object_leak_tracker _zone_manager.shutdown() _zone_manager = None tuning_managers.clear() _account_service =", "current_zone() if zone is not None and hasattr(zone, 'laundry_service'): return zone.laundry_service def get_object_routing_service():", "loading start is {}'.format(time_delta)) return True def start_global_services(initial_ticks): global _account_service, _zone_manager, _distributor_service, _intern_service", "current_zone().demographics_service def get_service_npc_service(): return current_zone().service_npc_service def conditional_layer_service(): return current_zone().conditional_layer_service def get_sickness_service(): return game_services.service_manager.sickness_service", "_object_leak_tracker = ObjectLeakTracker() if start: _object_leak_tracker.start_tracking() return True return False def get_object_leak_tracker(): return", "'holiday_service', None) def global_policy_service(): return getattr(game_services.service_manager, 'global_policy_service', None) def narrative_service(): return getattr(game_services.service_manager, 'narrative_service',", "= current_zone() if zone is not None: return zone.travel_group_manager return return _zone_manager.get(zone_id).travel_group_manager def", "'weather_service', None) def season_service(): return getattr(game_services.service_manager, 'season_service', None) def lot_decoration_service(): return getattr(game_services.service_manager, 'lot_decoration_service',", "delta from loading start is {}'.format(time_delta)) logger.info('Time delta from loading start is {}'.format(time_delta))", "_intern_service = None _terrain_service = None definition_manager = None snippet_manager = None _terrain_object", "= None _terrain_object = None _object_leak_tracker = None for definition in INSTANCE_TUNING_DEFINITIONS: accessor_name", "def zone_spin_up_service(): return current_zone().zone_spin_up_service def household_manager(): return game_services.service_manager.household_manager def travel_group_manager(zone_id=None): if zone_id is", "home_zone = get_zone(household.home_zone_id) if home_zone is not None: lot = home_zone.lot if lot", "create_object_leak_tracker() _account_service = AccountService() _zone_manager = ZoneManager() _distributor_service = DistributorService() _intern_service = InternService()", "production_logger.info('TimeStampService start at {}'.format(time_stamp)) logger.info('TimeStampService start at {}'.format(time_stamp)) if time_delta is None: time_delta", "return game_services.service_manager.client_manager def get_first_client(): return client_manager().get_first_client() def get_selectable_sims(): return get_first_client().selectable_sims def owning_household_id_of_active_lot(): zone", "None) def zone_spin_up_service(): return current_zone().zone_spin_up_service def household_manager(): return game_services.service_manager.household_manager def travel_group_manager(zone_id=None): if zone_id", "None: return zone.region def current_street(): zone = current_zone() if zone is not None:", "= client_manager().get_first_client() if client is not None: return client.household def active_household_id(): client =", "{}'.format(time_delta)) logger.info('Time delta from loading start is {}'.format(time_delta)) return True def start_global_services(initial_ticks): global", "from sims4.tuning.serialization import FinalizeTuningService from zone_manager import ZoneManager parser = argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False,", "None: if require_active_household and not household.is_active_household: return return household.object_preference_tracker travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id) if", "if client is not None: return client.active_sim def active_sim_info(): client = client_manager().get_first_client() if", "def privacy_service(): return current_zone().privacy_service def autonomy_service(): return current_zone().autonomy_service def get_aging_service(): return game_services.service_manager.aging_service def", "= None definition_manager = None snippet_manager = None _terrain_object = None _object_leak_tracker =", "_intern_service create_server_clock(initial_ticks) from distributor.distributor_service import DistributorService from intern_service import InternService from server.account_service import", "is not None: return zone.street def get_zone(zone_id, allow_uninstantiated_zones=False): if _zone_manager is not None:", "current_zone() if zone is not None: return zone.region def current_street(): zone = current_zone()", "_terrain_service = None definition_manager = None snippet_manager = None _terrain_object = None _object_leak_tracker", "sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu(): pass", "return game_services.service_manager.time_service def game_clock_service(): return game_services.service_manager.game_clock def server_clock_service(): if _server_clock_service is None: return", "= _zone.get_lot_description_id get_world_description_id = _zone.get_world_description_id get_world_id = _zone.get_world_id get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description =", "posture_graph_service(zone_id=None): if zone_id is None: zone = current_zone() if zone is not None:", "= _zone.invite_sims_to_zone get_house_description_id = _zone.get_house_description_id is_event_enabled = _zone.is_event_enabled get_building_type = _zone.get_building_type get_eco_footprint_value =", "= clock.ServerClock(ticks=initial_ticks) def get_master_controller(): return current_zone().master_controller def get_persistence_service(): global _persistence_service if _persistence_service is", "return get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service(): return", "intern_service import InternService from server.account_service import AccountService from services.persistence_service import PersistenceService from services.terrain_service", "current_zone() if zone is not None: return zone.posture_graph_service return return _zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None):", "return zone.object_routing_service def get_landlord_service(): return getattr(game_services.service_manager, 'landlord_service', None) def get_roommate_service(): return getattr(game_services.service_manager, 'roommate_service',", "def get_object_leak_tracker(): return _object_leak_tracker def get_zone_manager(): return _zone_manager def current_zone(): if _zone_manager is", "sims4.reload.protected(globals()): tuning_managers = InstanceTuningManagers() get_instance_manager = tuning_managers.__getitem__ _account_service = None _zone_manager = None", "= None _persistence_service = None _distributor_service = None _intern_service = None _terrain_service =", "def on_enter_main_menu(): pass def account_service(): return _account_service def business_service(): bs = game_services.service_manager.business_service return", "is not None: return zone.travel_group_manager return return _zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None): if household_id: return", "invite_sims_to_zone(*_, **__): pass @staticmethod def get_house_description_id(*_, **__): pass @staticmethod def get_building_type(*_, **__): return", "current_zone().fire_service def get_career_service(): return current_zone().career_service def get_story_progression_service(): return current_zone().story_progression_service def daycare_service(): zone =", "if zone_id is None: return current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service def locator_manager(): return current_zone().locator_manager def", "logger = sims4.log.Logger('Services') time_delta = None gc_collection_enable = True class TimeStampService(sims4.service_manager.Service): def start(self):", "game_services.service_manager.business_service return bs def get_terrain_service(): global _terrain_service if _terrain_service is None: from services.terrain_service", "is not None: return zone.posture_graph_service return return _zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None): if zone_id is", "definition_manager = None snippet_manager = None _terrain_object = None _object_leak_tracker = None for", "= client_manager().get_first_client() if client is not None: return client.active_sim_info def active_household(): client =", "def get_fire_service(): return current_zone().fire_service def get_career_service(): return current_zone().career_service def get_story_progression_service(): return current_zone().story_progression_service def", "@staticmethod def get_building_type(*_, **__): return 0 @staticmethod def get_eco_footprint_value(*_, **__): return 0 @staticmethod", "zone is not None: household = household_manager().get(zone.lot.owner_household_id) if household is not None: if", "return current_zone().autonomy_service def get_aging_service(): return game_services.service_manager.aging_service def get_cheat_service(): return game_services.service_manager.cheat_service def neighborhood_population_service(): return", "get_lot_description_id = _zone.get_lot_description_id get_world_description_id = _zone.get_world_description_id get_world_id = _zone.get_world_id get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description", "zone_id is None: zone = current_zone() if zone is not None: return zone.travel_group_manager", "zone_id is None: return current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service def get_photography_service(): return current_zone().photography_service def social_group_cluster_service():", "def game_clock_service(): return game_services.service_manager.game_clock def server_clock_service(): if _server_clock_service is None: return return _server_clock_service", "client.household def active_household_id(): client = client_manager().get_first_client() if client is not None: return client.household_id", "def active_household_lot_id(): household = active_household() if household is not None: home_zone = get_zone(household.home_zone_id)", "_distributor_service, _intern_service create_server_clock(initial_ticks) from distributor.distributor_service import DistributorService from intern_service import InternService from server.account_service", "None: zone = current_zone() if zone is not None: return zone.inventory_manager return return", "return current_zone().zone_spin_up_service def household_manager(): return game_services.service_manager.household_manager def travel_group_manager(zone_id=None): if zone_id is None: zone", "return current_zone().plex_service def get_door_service(): return current_zone().door_service def get_zone_modifier_service(): return current_zone().zone_modifier_service def get_demographics_service(): return", "instantiated_tuning_managers = [] for definition in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(),", "None: zone = current_zone() if zone is not None: return zone.travel_group_manager return return", "services.persistence_service import PersistenceService from services.terrain_service import TerrainService from sims4.tuning.serialization import FinalizeTuningService from zone_manager", "def get_rent(*_, **__): return 0 @staticmethod def get_lot_description_id(*_, **__): pass @staticmethod def get_world_description_id(*_,", "= [_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers = [] for definition in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE])", "accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name] = accessor production_logger = sims4.log.ProductionLogger('Services') logger = sims4.log.Logger('Services')", "get_event_manager(): return game_services.service_manager.event_manager_service def get_current_venue(): service = venue_service() if service is not None:", "def get_story_progression_service(): return current_zone().story_progression_service def daycare_service(): zone = current_zone() if zone is not", "return return _zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None): if zone_id is None: zone = current_zone() else:", "home_zone.lot if lot is not None: return lot.lot_id def privacy_service(): return current_zone().privacy_service def", "game_services.service_manager.sickness_service def get_curfew_service(): return game_services.service_manager.curfew_service def get_locale(): client = get_first_client() return client.account.locale def", "return _zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service def ensemble_service(): return current_zone().ensemble_service def sim_filter_service(zone_id=None): if", "if _zone_manager is not None: return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot(): zone = current_zone()", "client.active_sim_info def active_household(): client = client_manager().get_first_client() if client is not None: return client.household", "is not None: if require_active_household and not household.is_active_household: return return household.object_preference_tracker travel_group =", "zone is not None: return household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False): zone = current_zone() if zone", "None _persistence_service = None _distributor_service = None _intern_service = None _terrain_service = None", "args.python_autoleak: create_object_leak_tracker() _account_service = AccountService() _zone_manager = ZoneManager() _distributor_service = DistributorService() _intern_service =", "True class TimeStampService(sims4.service_manager.Service): def start(self): global gc_collection_enable, time_delta if gc_collection_enable: gc.disable() production_logger.info('GC disabled')", "object_manager(zone_id=None): if zone_id is None: zone = current_zone() else: zone = _zone_manager.get(zone_id) if", "get_story_progression_service(): return current_zone().story_progression_service def daycare_service(): zone = current_zone() if zone is not None:", "import game_services import paths import sims4.reload import sims4.service_manager try: import _zone except ImportError:", "if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu(): pass def account_service(): return _account_service def business_service():", "return _server_clock_service def create_server_clock(initial_ticks): global _server_clock_service import clock _server_clock_service = clock.ServerClock(ticks=initial_ticks) def get_master_controller():", "travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group is not None: if require_active_household and not travel_group.is_active_sim_in_travel_group: return else:", "Tunable, TunableReference import game_services import paths import sims4.reload import sims4.service_manager try: import _zone", "at {}'.format(time_stamp)) if time_delta is None: time_delta = time_stamp else: time_delta = time_stamp", "None: return client.household_id def active_household_lot_id(): household = active_household() if household is not None:", "= tuning_managers.__getitem__ _account_service = None _zone_manager = None _server_clock_service = None _persistence_service =", "= sims4.log.ProductionLogger('Services') logger = sims4.log.Logger('Services') time_delta = None gc_collection_enable = True class TimeStampService(sims4.service_manager.Service):", "stop_global_services(): global _zone_manager, _account_service, _event_manager, _server_clock_service, _persistence_service, _terrain_service, _distributor_service, _intern_service, _object_leak_tracker _zone_manager.shutdown() _zone_manager", "household_manager().get(zone.lot.owner_household_id) if household is not None: if require_active_household and not household.is_active_household: return return", "def get_service_npc_service(): return current_zone().service_npc_service def conditional_layer_service(): return current_zone().conditional_layer_service def get_sickness_service(): return game_services.service_manager.sickness_service def", "FinalizeTuningService from zone_manager import ZoneManager parser = argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False, action='store_true') (args, unused_args)", "business_service(): bs = game_services.service_manager.business_service return bs def get_terrain_service(): global _terrain_service if _terrain_service is", "is not None: home_zone = get_zone(household.home_zone_id) if home_zone is not None: lot =", "def get_cheat_service(): return game_services.service_manager.cheat_service def neighborhood_population_service(): return current_zone().neighborhood_population_service def get_reset_and_delete_service(): return current_zone().reset_and_delete_service def", "gc_collection_enable = False else: gc.enable() production_logger.info('GC enabled') gc_collection_enable = True time_stamp = time.time()", "= active_household() if household is not None: home_zone = get_zone(household.home_zone_id) if home_zone is", "current_zone().service_npc_service def conditional_layer_service(): return current_zone().conditional_layer_service def get_sickness_service(): return game_services.service_manager.sickness_service def get_curfew_service(): return game_services.service_manager.curfew_service", "None and hasattr(zone, 'laundry_service'): return zone.laundry_service def get_object_routing_service(): zone = current_zone() if zone", "'lot_decoration_service', None) def get_style_service(): return game_services.service_manager.style_service def get_tutorial_service(): return game_services.service_manager.tutorial_service def calendar_service(): return", "not None: home_zone = get_zone(household.home_zone_id) if home_zone is not None: lot = home_zone.lot", "prop_manager(zone_id=None): if zone_id is None: zone = current_zone() else: zone = _zone_manager.get(zone_id) if", "current_zone().venue_service def venue_game_service(): return getattr(game_services.service_manager, 'venue_game_service', None) def zone_spin_up_service(): return current_zone().zone_spin_up_service def household_manager():", "argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False, action='store_true') (args, unused_args) = parser.parse_known_args() if args.python_autoleak: create_object_leak_tracker() _account_service =", "not None: return sims4.zone_utils.zone_id def current_zone_info(): zone = current_zone() return zone.get_zone_info() def current_region():", "_distributor_service = None _intern_service = None _terrain_service = None definition_manager = None snippet_manager", "return True def start_global_services(initial_ticks): global _account_service, _zone_manager, _distributor_service, _intern_service create_server_clock(initial_ticks) from distributor.distributor_service import", "'object_routing_service'): return zone.object_routing_service def get_landlord_service(): return getattr(game_services.service_manager, 'landlord_service', None) def get_roommate_service(): return getattr(game_services.service_manager,", "client_manager(): return game_services.service_manager.client_manager def get_first_client(): return client_manager().get_first_client() def get_selectable_sims(): return get_first_client().selectable_sims def owning_household_id_of_active_lot():", "def get_lot_description_id(*_, **__): pass @staticmethod def get_world_description_id(*_, **__): pass @staticmethod def get_world_id(*_, **__):", "def get_demographics_service(): return current_zone().demographics_service def get_service_npc_service(): return current_zone().service_npc_service def conditional_layer_service(): return current_zone().conditional_layer_service def", "<gh_stars>1-10 import argparse import functools import gc import time from services.tuning_managers import InstanceTuningManagers", "_server_clock_service = None _persistence_service = None _distributor_service = None _intern_service = None _terrain_service", "None) def get_club_service(): return getattr(game_services.service_manager, 'club_service', None) def get_culling_service(): return current_zone().culling_service def get_gardening_service():", "is None: zone = current_zone() if zone is not None: return zone.travel_group_manager return", "def active_sim_info(): client = client_manager().get_first_client() if client is not None: return client.active_sim_info def", "if zone is not None: return zone.object_manager def inventory_manager(zone_id=None): if zone_id is None:", "household_id: return get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id)", "zone = current_zone() if zone is not None: return household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False): zone", "None) def global_policy_service(): return getattr(game_services.service_manager, 'global_policy_service', None) def narrative_service(): return getattr(game_services.service_manager, 'narrative_service', None)", "except ImportError: class _zone: @staticmethod def invite_sims_to_zone(*_, **__): pass @staticmethod def get_house_description_id(*_, **__):", "_persistence_service = None _distributor_service = None _intern_service = None _terrain_service = None definition_manager", "if _terrain_service is None: from services.terrain_service import TerrainService _terrain_service = TerrainService() return _terrain_service", "@staticmethod def get_world_description_id(*_, **__): pass @staticmethod def get_world_id(*_, **__): pass @staticmethod def get_world_and_lot_description_id_from_zone_id(*_,", "get_zone(zone_id, allow_uninstantiated_zones=False): if _zone_manager is not None: return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot(): zone", "if zone is not None and hasattr(zone, 'laundry_service'): return zone.laundry_service def get_object_routing_service(): zone", "None: return current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service def locator_manager(): return current_zone().locator_manager def object_manager(zone_id=None): if zone_id", "start: _object_leak_tracker.start_tracking() return True return False def get_object_leak_tracker(): return _object_leak_tracker def get_zone_manager(): return", "'narrative_service', None) def organization_service(): return getattr(game_services.service_manager, 'organization_service', None) def get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service def", "_zone.get_rent get_lot_description_id = _zone.get_lot_description_id get_world_description_id = _zone.get_world_description_id get_world_id = _zone.get_world_id get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id", "= None _server_clock_service = None _persistence_service = None _terrain_service = None _distributor_service =", "return zone.posture_graph_service return return _zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None): if zone_id is None: return current_zone().sim_spawner_service", "= _zone.is_event_enabled get_building_type = _zone.get_building_type get_eco_footprint_value = _zone.get_eco_footprint_value get_rent = _zone.get_rent get_lot_description_id =", "get_selectable_sims(): return get_first_client().selectable_sims def owning_household_id_of_active_lot(): zone = current_zone() if zone is not None:", "import sims4.reload import sims4.service_manager try: import _zone except ImportError: class _zone: @staticmethod def", "def npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service def ensemble_service(): return current_zone().ensemble_service def sim_filter_service(zone_id=None): if zone_id is", "= ZoneManager() _distributor_service = DistributorService() _intern_service = InternService() init_critical_services = [server_clock_service(), get_persistence_service()] services", "True def start_global_services(initial_ticks): global _account_service, _zone_manager, _distributor_service, _intern_service create_server_clock(initial_ticks) from distributor.distributor_service import DistributorService", "production_logger.info('Time delta from loading start is {}'.format(time_delta)) logger.info('Time delta from loading start is", "instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services, services) def stop_global_services(): global", "= False else: gc.enable() production_logger.info('GC enabled') gc_collection_enable = True time_stamp = time.time() production_logger.info('TimeStampService", "game_services.service_manager is not None: return game_services.service_manager.client_object_managers return () def sim_info_manager(): return game_services.service_manager.sim_info_manager def", "def object_manager(zone_id=None): if zone_id is None: zone = current_zone() else: zone = _zone_manager.get(zone_id)", "current_zone().adoption_service def get_laundry_service(): zone = current_zone() if zone is not None and hasattr(zone,", "get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service def street_service(): return getattr(game_services.service_manager, 'street_service', None) def c_api_gsi_dump(): import server_commands.developer_commands", "'global_policy_service', None) def narrative_service(): return getattr(game_services.service_manager, 'narrative_service', None) def organization_service(): return getattr(game_services.service_manager, 'organization_service',", "def neighborhood_population_service(): return current_zone().neighborhood_population_service def get_reset_and_delete_service(): return current_zone().reset_and_delete_service def venue_service(): return current_zone().venue_service def", "time_delta production_logger.info('Time delta from loading start is {}'.format(time_delta)) logger.info('Time delta from loading start", "DistributorService() _intern_service = InternService() init_critical_services = [server_clock_service(), get_persistence_service()] services = [_distributor_service, _intern_service, _intern_service.get_start_interning(),", "else: zone = _zone_manager.get(zone_id) if zone is not None: return zone.prop_manager def social_group_manager():", "def single_part_condition_list(): return current_zone().single_part_condition_list def multi_part_condition_list(): return current_zone().multi_part_condition_list def get_event_manager(): return game_services.service_manager.event_manager_service def", "def get_laundry_service(): zone = current_zone() if zone is not None and hasattr(zone, 'laundry_service'):", "0 @staticmethod def get_lot_description_id(*_, **__): pass @staticmethod def get_world_description_id(*_, **__): pass @staticmethod def", "getattr(game_services.service_manager, 'organization_service', None) def get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service def street_service(): return getattr(game_services.service_manager, 'street_service', None)", "is not None: return zone.prop_manager def social_group_manager(): return current_zone().social_group_manager def client_manager(): return game_services.service_manager.client_manager", "get_world_id = _zone.get_world_id get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker = _zone.get_hide_from_lot_picker with", "services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services, services) def stop_global_services(): global _zone_manager, _account_service,", "get_current_venue(): service = venue_service() if service is not None: return service.active_venue def get_intern_service():", "zone.object_routing_service def get_landlord_service(): return getattr(game_services.service_manager, 'landlord_service', None) def get_roommate_service(): return getattr(game_services.service_manager, 'roommate_service', None)", "_zone_manager, _account_service, _event_manager, _server_clock_service, _persistence_service, _terrain_service, _distributor_service, _intern_service, _object_leak_tracker _zone_manager.shutdown() _zone_manager = None", "if zone is not None: return household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False): zone = current_zone() if", "lot is not None: return lot.lot_id def client_object_managers(): if game_services.service_manager is not None:", "= current_zone() if zone is not None: return zone.daycare_service def get_adoption_service(): return current_zone().adoption_service", "return current_zone().ensemble_service def sim_filter_service(zone_id=None): if zone_id is None: return current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service def", "**__): return 0 @staticmethod def get_rent(*_, **__): return 0 @staticmethod def get_lot_description_id(*_, **__):", "current_zone().neighborhood_population_service def get_reset_and_delete_service(): return current_zone().reset_and_delete_service def venue_service(): return current_zone().venue_service def venue_game_service(): return getattr(game_services.service_manager,", "zone is not None: return zone.object_manager def inventory_manager(zone_id=None): if zone_id is None: zone", "on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu():", "() def sim_info_manager(): return game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None): if zone_id is None: zone =", "AccountService() _zone_manager = ZoneManager() _distributor_service = DistributorService() _intern_service = InternService() init_critical_services = [server_clock_service(),", "get_gardening_service(): return current_zone().gardening_service def drama_scheduler_service(): return current_zone().drama_schedule_service def get_plex_service(): return current_zone().plex_service def get_door_service():", "global _object_leak_tracker from performance.object_leak_tracker import ObjectLeakTracker if _object_leak_tracker is None: _object_leak_tracker = ObjectLeakTracker()", "= current_zone() if zone is not None and hasattr(zone, 'object_routing_service'): return zone.object_routing_service def", "disabled') gc_collection_enable = False else: gc.enable() production_logger.info('GC enabled') gc_collection_enable = True time_stamp =", "services.persistence_service import PersistenceService _persistence_service = PersistenceService() return _persistence_service def get_distributor_service(): return _distributor_service def", "current_zone() if zone is not None: return zone.lot def active_lot_id(): lot = active_lot()", "return current_zone().social_group_manager def client_manager(): return game_services.service_manager.client_manager def get_first_client(): return client_manager().get_first_client() def get_selectable_sims(): return", "global _zone_manager, _account_service, _event_manager, _server_clock_service, _persistence_service, _terrain_service, _distributor_service, _intern_service, _object_leak_tracker _zone_manager.shutdown() _zone_manager =", "return current_zone().master_controller def get_persistence_service(): global _persistence_service if _persistence_service is None: from services.persistence_service import", "return current_zone().reset_and_delete_service def venue_service(): return current_zone().venue_service def venue_game_service(): return getattr(game_services.service_manager, 'venue_game_service', None) def", "= current_zone() if zone is not None: return household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False): zone =", "gc import time from services.tuning_managers import InstanceTuningManagers from sims4.resources import INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager", "zone is not None and hasattr(zone, 'laundry_service'): return zone.laundry_service def get_object_routing_service(): zone =", "PersistenceService() return _persistence_service def get_distributor_service(): return _distributor_service def get_fire_service(): return current_zone().fire_service def get_career_service():", "game_services.service_manager.rabbit_hole_service def holiday_service(): return getattr(game_services.service_manager, 'holiday_service', None) def global_policy_service(): return getattr(game_services.service_manager, 'global_policy_service', None)", "= None if _object_leak_tracker is not None: _object_leak_tracker = None def create_object_leak_tracker(start=False): global", "return getattr(game_services.service_manager, 'roommate_service', None) def get_club_service(): return getattr(game_services.service_manager, 'club_service', None) def get_culling_service(): return", "INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager import TuningInstanceManager from sims4.tuning.tunable import Tunable, TunableReference import game_services import", "_terrain_service is None: from services.terrain_service import TerrainService _terrain_service = TerrainService() return _terrain_service def", "zone_id is None: zone = current_zone() else: zone = _zone_manager.get(zone_id) if zone is", "is {}'.format(time_delta)) logger.info('Time delta from loading start is {}'.format(time_delta)) return True def start_global_services(initial_ticks):", "def get_zone_situation_manager(zone_id=None): if zone_id is None: return current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service(): return", "game_services import paths import sims4.reload import sims4.service_manager try: import _zone except ImportError: class", "drama_scheduler_service(): return current_zone().drama_schedule_service def get_plex_service(): return current_zone().plex_service def get_door_service(): return current_zone().door_service def get_zone_modifier_service():", "get_master_controller(): return current_zone().master_controller def get_persistence_service(): global _persistence_service if _persistence_service is None: from services.persistence_service", "[] for definition in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service])", "gc.disable() production_logger.info('GC disabled') gc_collection_enable = False else: gc.enable() production_logger.info('GC enabled') gc_collection_enable = True", "time_stamp - time_delta production_logger.info('Time delta from loading start is {}'.format(time_delta)) logger.info('Time delta from", "organization_service(): return getattr(game_services.service_manager, 'organization_service', None) def get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service def street_service(): return getattr(game_services.service_manager,", "_zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None): if zone_id is None: zone = current_zone() else: zone =", "None _terrain_object = None _object_leak_tracker = None for definition in INSTANCE_TUNING_DEFINITIONS: accessor_name =", "current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service def locator_manager(): return current_zone().locator_manager def object_manager(zone_id=None): if zone_id is None:", "return client_manager().get_first_client() def get_selectable_sims(): return get_first_client().selectable_sims def owning_household_id_of_active_lot(): zone = current_zone() if zone", "_zone_manager.get(zone_id).sim_filter_service def get_photography_service(): return current_zone().photography_service def social_group_cluster_service(): return current_zone().social_group_cluster_service def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client)", "_terrain_service, _distributor_service, _intern_service, _object_leak_tracker _zone_manager.shutdown() _zone_manager = None tuning_managers.clear() _account_service = None _event_manager", "def get_photography_service(): return current_zone().photography_service def social_group_cluster_service(): return current_zone().social_group_cluster_service def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client)", "return return household.object_preference_tracker travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group is not None: if require_active_household", "if client is not None: return client.household def active_household_id(): client = client_manager().get_first_client() if", "lot.lot_id def privacy_service(): return current_zone().privacy_service def autonomy_service(): return current_zone().autonomy_service def get_aging_service(): return game_services.service_manager.aging_service", "def create_server_clock(initial_ticks): global _server_clock_service import clock _server_clock_service = clock.ServerClock(ticks=initial_ticks) def get_master_controller(): return current_zone().master_controller", "get_terrain_service(): global _terrain_service if _terrain_service is None: from services.terrain_service import TerrainService _terrain_service =", "None: return current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service def ensemble_service(): return current_zone().ensemble_service", "def get_career_service(): return current_zone().career_service def get_story_progression_service(): return current_zone().story_progression_service def daycare_service(): zone = current_zone()", "is not None: household = household_manager().get(zone.lot.owner_household_id) if household is not None: if require_active_household", "TerrainService _terrain_service = TerrainService() return _terrain_service def call_to_action_service(): return game_services.service_manager.call_to_action_service def trend_service(): return", "current_zone() if zone is not None: return zone.lot.owner_household_id def owning_household_of_active_lot(): zone = current_zone()", "zone = current_zone() if zone is not None: return zone.lot.owner_household_id def owning_household_of_active_lot(): zone", "getattr(game_services.service_manager, 'landlord_service', None) def get_roommate_service(): return getattr(game_services.service_manager, 'roommate_service', None) def get_club_service(): return getattr(game_services.service_manager,", "zone.get_zone_info() def current_region(): zone = current_zone() if zone is not None: return zone.region", "client is not None: return client.active_sim_info def active_household(): client = client_manager().get_first_client() if client", "_zone.get_hide_from_lot_picker with sims4.reload.protected(globals()): tuning_managers = InstanceTuningManagers() get_instance_manager = tuning_managers.__getitem__ _account_service = None _zone_manager", "if require_active_household and not travel_group.is_active_sim_in_travel_group: return else: return travel_group.object_preference_tracker def get_active_sim(): client =", "current_street(): zone = current_zone() if zone is not None: return zone.street def get_zone(zone_id,", "TimeStampService] instantiated_tuning_managers = [] for definition in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(),", "_distributor_service = DistributorService() _intern_service = InternService() init_critical_services = [server_clock_service(), get_persistence_service()] services = [_distributor_service,", "sims4.log.ProductionLogger('Services') logger = sims4.log.Logger('Services') time_delta = None gc_collection_enable = True class TimeStampService(sims4.service_manager.Service): def", "enabled') gc_collection_enable = True time_stamp = time.time() production_logger.info('TimeStampService start at {}'.format(time_stamp)) logger.info('TimeStampService start", "= current_zone() if zone is not None: return zone.region def current_street(): zone =", "active_household(): client = client_manager().get_first_client() if client is not None: return client.household def active_household_id():", "return bs def get_terrain_service(): global _terrain_service if _terrain_service is None: from services.terrain_service import", "def get_aging_service(): return game_services.service_manager.aging_service def get_cheat_service(): return game_services.service_manager.cheat_service def neighborhood_population_service(): return current_zone().neighborhood_population_service def", "def multi_part_condition_list(): return current_zone().multi_part_condition_list def get_event_manager(): return game_services.service_manager.event_manager_service def get_current_venue(): service = venue_service()", "bs def get_terrain_service(): global _terrain_service if _terrain_service is None: from services.terrain_service import TerrainService", "import TerrainService _terrain_service = TerrainService() return _terrain_service def call_to_action_service(): return game_services.service_manager.call_to_action_service def trend_service():", "return household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False): zone = current_zone() if zone is not None: household", "return client.household def active_household_id(): client = client_manager().get_first_client() if client is not None: return", "getattr(game_services.service_manager, 'venue_game_service', None) def zone_spin_up_service(): return current_zone().zone_spin_up_service def household_manager(): return game_services.service_manager.household_manager def travel_group_manager(zone_id=None):", "time_delta = None gc_collection_enable = True class TimeStampService(sims4.service_manager.Service): def start(self): global gc_collection_enable, time_delta", "None: return zone.daycare_service def get_adoption_service(): return current_zone().adoption_service def get_laundry_service(): zone = current_zone() if", "is None: return current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service def locator_manager(): return current_zone().locator_manager def object_manager(zone_id=None): if", "current_zone().travel_service def sim_quadtree(): return current_zone().sim_quadtree def single_part_condition_list(): return current_zone().single_part_condition_list def multi_part_condition_list(): return current_zone().multi_part_condition_list", "def active_household(): client = client_manager().get_first_client() if client is not None: return client.household def", "zone.lot def active_lot_id(): lot = active_lot() if lot is not None: return lot.lot_id", "is not None: return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot(): zone = current_zone() if zone", "time_delta if gc_collection_enable: gc.disable() production_logger.info('GC disabled') gc_collection_enable = False else: gc.enable() production_logger.info('GC enabled')", "return client.active_sim_info def active_household(): client = client_manager().get_first_client() if client is not None: return", "is not None: return client.active_sim def active_sim_info(): client = client_manager().get_first_client() if client is", "is None: zone = current_zone() else: zone = _zone_manager.get(zone_id) if zone is not", "return getattr(game_services.service_manager, 'landlord_service', None) def get_roommate_service(): return getattr(game_services.service_manager, 'roommate_service', None) def get_club_service(): return", "_persistence_service is None: from services.persistence_service import PersistenceService _persistence_service = PersistenceService() return _persistence_service def", "household.is_active_household: return return household.object_preference_tracker travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group is not None: if", "functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name] = accessor production_logger = sims4.log.ProductionLogger('Services') logger = sims4.log.Logger('Services') time_delta =", "return current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service def get_photography_service(): return current_zone().photography_service def social_group_cluster_service(): return current_zone().social_group_cluster_service def", "return _zone_manager.get(zone_id).sim_filter_service def get_photography_service(): return current_zone().photography_service def social_group_cluster_service(): return current_zone().social_group_cluster_service def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client)", "zone = current_zone() if zone is not None: return zone.posture_graph_service return return _zone_manager.get(zone_id).posture_graph_service", "logger.info('Time delta from loading start is {}'.format(time_delta)) return True def start_global_services(initial_ticks): global _account_service,", "delta from loading start is {}'.format(time_delta)) return True def start_global_services(initial_ticks): global _account_service, _zone_manager,", "_object_leak_tracker = None def create_object_leak_tracker(start=False): global _object_leak_tracker from performance.object_leak_tracker import ObjectLeakTracker if _object_leak_tracker", "zone is not None and hasattr(zone, 'object_routing_service'): return zone.object_routing_service def get_landlord_service(): return getattr(game_services.service_manager,", "definition.TYPE_ENUM_VALUE) globals()[accessor_name] = accessor production_logger = sims4.log.ProductionLogger('Services') logger = sims4.log.Logger('Services') time_delta = None", "at {}'.format(time_stamp)) logger.info('TimeStampService start at {}'.format(time_stamp)) if time_delta is None: time_delta = time_stamp", "_persistence_service = PersistenceService() return _persistence_service def get_distributor_service(): return _distributor_service def get_fire_service(): return current_zone().fire_service", "def social_group_manager(): return current_zone().social_group_manager def client_manager(): return game_services.service_manager.client_manager def get_first_client(): return client_manager().get_first_client() def", "holiday_service(): return getattr(game_services.service_manager, 'holiday_service', None) def global_policy_service(): return getattr(game_services.service_manager, 'global_policy_service', None) def narrative_service():", "None: return _zone_manager.current_zone def current_zone_id(): if _zone_manager is not None: return sims4.zone_utils.zone_id def", "_distributor_service = None _intern_service = None if _object_leak_tracker is not None: _object_leak_tracker =", "_zone.get_world_description_id get_world_id = _zone.get_world_id get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker = _zone.get_hide_from_lot_picker", "= _zone.get_house_description_id is_event_enabled = _zone.is_event_enabled get_building_type = _zone.get_building_type get_eco_footprint_value = _zone.get_eco_footprint_value get_rent =", "def get_distributor_service(): return _distributor_service def get_fire_service(): return current_zone().fire_service def get_career_service(): return current_zone().career_service def", "get_door_service(): return current_zone().door_service def get_zone_modifier_service(): return current_zone().zone_modifier_service def get_demographics_service(): return current_zone().demographics_service def get_service_npc_service():", "accessor_name = definition.manager_name accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name] = accessor production_logger = sims4.log.ProductionLogger('Services')", "def get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service(): return current_zone().ui_dialog_service def config_service(): return game_services.service_manager.config_service def", "= parser.parse_known_args() if args.python_autoleak: create_object_leak_tracker() _account_service = AccountService() _zone_manager = ZoneManager() _distributor_service =", "def owning_household_id_of_active_lot(): zone = current_zone() if zone is not None: return zone.lot.owner_household_id def", "return zone.prop_manager def social_group_manager(): return current_zone().social_group_manager def client_manager(): return game_services.service_manager.client_manager def get_first_client(): return", "def locator_manager(): return current_zone().locator_manager def object_manager(zone_id=None): if zone_id is None: zone = current_zone()", "def get_club_service(): return getattr(game_services.service_manager, 'club_service', None) def get_culling_service(): return current_zone().culling_service def get_gardening_service(): return", "is not None and hasattr(zone, 'laundry_service'): return zone.laundry_service def get_object_routing_service(): zone = current_zone()", "return getattr(game_services.service_manager, 'organization_service', None) def get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service def street_service(): return getattr(game_services.service_manager, 'street_service',", "def calendar_service(): return current_zone().calendar_service def get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service def holiday_service(): return getattr(game_services.service_manager, 'holiday_service',", "= _zone.get_hide_from_lot_picker with sims4.reload.protected(globals()): tuning_managers = InstanceTuningManagers() get_instance_manager = tuning_managers.__getitem__ _account_service = None", "def account_service(): return _account_service def business_service(): bs = game_services.service_manager.business_service return bs def get_terrain_service():", "_zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services, services) def stop_global_services(): global _zone_manager, _account_service, _event_manager, _server_clock_service, _persistence_service, _terrain_service,", "household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False): zone = current_zone() if zone is not None: household =", "not None: return client.household_id def active_household_lot_id(): household = active_household() if household is not", "loading start is {}'.format(time_delta)) logger.info('Time delta from loading start is {}'.format(time_delta)) return True", "return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot(): zone = current_zone() if zone is not None:", "account_service(): return _account_service def business_service(): bs = game_services.service_manager.business_service return bs def get_terrain_service(): global", "client = get_first_client() return client.account.locale def relationship_service(): return game_services.service_manager.relationship_service def hidden_sim_service(): return game_services.service_manager.hidden_sim_service", "get_lot_description_id(*_, **__): pass @staticmethod def get_world_description_id(*_, **__): pass @staticmethod def get_world_id(*_, **__): pass", "get_is_eco_footprint_compatible_for_world_description(*_, **__): return False @staticmethod def get_hide_from_lot_picker(*_, **__): pass @staticmethod def is_event_enabled(*_, **__):", "def get_zone(zone_id, allow_uninstantiated_zones=False): if _zone_manager is not None: return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot():", "_zone_manager.shutdown() _zone_manager = None tuning_managers.clear() _account_service = None _event_manager = None _server_clock_service =", "'season_service', None) def lot_decoration_service(): return getattr(game_services.service_manager, 'lot_decoration_service', None) def get_style_service(): return game_services.service_manager.style_service def", "is not None: if require_active_household and not travel_group.is_active_sim_in_travel_group: return else: return travel_group.object_preference_tracker def", "return 0 @staticmethod def get_rent(*_, **__): return 0 @staticmethod def get_lot_description_id(*_, **__): pass", "= [] for definition in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager,", "zone is not None: return zone.travel_group_manager return return _zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None): if household_id:", "return _account_service def business_service(): bs = game_services.service_manager.business_service return bs def get_terrain_service(): global _terrain_service", "zone = _zone_manager.get(zone_id) if zone is not None: return zone.prop_manager def social_group_manager(): return", "= get_zone(household.home_zone_id) if home_zone is not None: lot = home_zone.lot if lot is", "import TuningInstanceManager from sims4.tuning.tunable import Tunable, TunableReference import game_services import paths import sims4.reload", "current_zone().sim_quadtree def single_part_condition_list(): return current_zone().single_part_condition_list def multi_part_condition_list(): return current_zone().multi_part_condition_list def get_event_manager(): return game_services.service_manager.event_manager_service", "_object_leak_tracker.start_tracking() return True return False def get_object_leak_tracker(): return _object_leak_tracker def get_zone_manager(): return _zone_manager", "zone = current_zone() if zone is not None: return zone.lot def active_lot_id(): lot", "return game_services.service_manager.event_manager_service def get_current_venue(): service = venue_service() if service is not None: return", "pass @staticmethod def get_world_description_id(*_, **__): pass @staticmethod def get_world_id(*_, **__): pass @staticmethod def", "from services.terrain_service import TerrainService from sims4.tuning.serialization import FinalizeTuningService from zone_manager import ZoneManager parser", "def utilities_manager(household_id=None): if household_id: return get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def", "@staticmethod def get_world_id(*_, **__): pass @staticmethod def get_world_and_lot_description_id_from_zone_id(*_, **__): pass @staticmethod def get_is_eco_footprint_compatible_for_world_description(*_,", "_object_leak_tracker is None: _object_leak_tracker = ObjectLeakTracker() if start: _object_leak_tracker.start_tracking() return True return False", "None: return game_services.service_manager.client_object_managers return () def sim_info_manager(): return game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None): if zone_id", "current_zone_id(): if _zone_manager is not None: return sims4.zone_utils.zone_id def current_zone_info(): zone = current_zone()", "import Tunable, TunableReference import game_services import paths import sims4.reload import sims4.service_manager try: import", "if zone is not None: return zone.posture_graph_service return return _zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None): if", "is not None: return client.active_sim_info def active_household(): client = client_manager().get_first_client() if client is", "game_services.service_manager.style_service def get_tutorial_service(): return game_services.service_manager.tutorial_service def calendar_service(): return current_zone().calendar_service def get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service", "zone = current_zone() if zone is not None: return zone.travel_group_manager return return _zone_manager.get(zone_id).travel_group_manager", "if lot is not None: return lot.lot_id def privacy_service(): return current_zone().privacy_service def autonomy_service():", "if zone is not None: return zone.lot def active_lot_id(): lot = active_lot() if", "return game_services.service_manager.household_manager def travel_group_manager(zone_id=None): if zone_id is None: zone = current_zone() if zone", "is None: zone = current_zone() if zone is not None: return zone.inventory_manager return", "snippet_manager = None _terrain_object = None _object_leak_tracker = None for definition in INSTANCE_TUNING_DEFINITIONS:", "is {}'.format(time_delta)) return True def start_global_services(initial_ticks): global _account_service, _zone_manager, _distributor_service, _intern_service create_server_clock(initial_ticks) from", "_zone_manager.current_zone def current_zone_id(): if _zone_manager is not None: return sims4.zone_utils.zone_id def current_zone_info(): zone", "get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service def holiday_service(): return getattr(game_services.service_manager, 'holiday_service', None) def global_policy_service(): return getattr(game_services.service_manager,", "import FinalizeTuningService from zone_manager import ZoneManager parser = argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False, action='store_true') (args,", "import PersistenceService from services.terrain_service import TerrainService from sims4.tuning.serialization import FinalizeTuningService from zone_manager import", "get_career_service(): return current_zone().career_service def get_story_progression_service(): return current_zone().story_progression_service def daycare_service(): zone = current_zone() if", "def current_zone(): if _zone_manager is not None: return _zone_manager.current_zone def current_zone_id(): if _zone_manager", "get_rent(*_, **__): return 0 @staticmethod def get_lot_description_id(*_, **__): pass @staticmethod def get_world_description_id(*_, **__):", "@staticmethod def invite_sims_to_zone(*_, **__): pass @staticmethod def get_house_description_id(*_, **__): pass @staticmethod def get_building_type(*_,", "def get_reset_and_delete_service(): return current_zone().reset_and_delete_service def venue_service(): return current_zone().venue_service def venue_game_service(): return getattr(game_services.service_manager, 'venue_game_service',", "if _object_leak_tracker is None: _object_leak_tracker = ObjectLeakTracker() if start: _object_leak_tracker.start_tracking() return True return", "services.terrain_service import TerrainService _terrain_service = TerrainService() return _terrain_service def call_to_action_service(): return game_services.service_manager.call_to_action_service def", "client_manager().get_first_client() if client is not None: return client.household_id def active_household_lot_id(): household = active_household()", "require_active_household and not travel_group.is_active_sim_in_travel_group: return else: return travel_group.object_preference_tracker def get_active_sim(): client = client_manager().get_first_client()", "is not None: return _zone_manager.current_zone def current_zone_id(): if _zone_manager is not None: return", "None: lot = home_zone.lot if lot is not None: return lot.lot_id def privacy_service():", "def ui_dialog_service(): return current_zone().ui_dialog_service def config_service(): return game_services.service_manager.config_service def travel_service(): return current_zone().travel_service def", "_zone_manager = None tuning_managers.clear() _account_service = None _event_manager = None _server_clock_service = None", "= current_zone() if zone is not None: return zone.lot.owner_household_id def owning_household_of_active_lot(): zone =", "current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service def ensemble_service(): return current_zone().ensemble_service def sim_filter_service(zone_id=None):", "if zone_id is None: return current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service def", "return current_zone().service_npc_service def conditional_layer_service(): return current_zone().conditional_layer_service def get_sickness_service(): return game_services.service_manager.sickness_service def get_curfew_service(): return", "current_zone().photography_service def social_group_cluster_service(): return current_zone().social_group_cluster_service def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client)", "import argparse import functools import gc import time from services.tuning_managers import InstanceTuningManagers from", "return current_zone().fire_service def get_career_service(): return current_zone().career_service def get_story_progression_service(): return current_zone().story_progression_service def daycare_service(): zone", "server.account_service import AccountService from services.persistence_service import PersistenceService from services.terrain_service import TerrainService from sims4.tuning.serialization", "current_zone().zone_spin_up_service def household_manager(): return game_services.service_manager.household_manager def travel_group_manager(zone_id=None): if zone_id is None: zone =", "return getattr(game_services.service_manager, 'global_policy_service', None) def narrative_service(): return getattr(game_services.service_manager, 'narrative_service', None) def organization_service(): return", "create_server_clock(initial_ticks): global _server_clock_service import clock _server_clock_service = clock.ServerClock(ticks=initial_ticks) def get_master_controller(): return current_zone().master_controller def", "_zone_manager.get(zone_id) if zone is not None: return zone.prop_manager def social_group_manager(): return current_zone().social_group_manager def", "not None: return client.household def active_household_id(): client = client_manager().get_first_client() if client is not", "return zone.daycare_service def get_adoption_service(): return current_zone().adoption_service def get_laundry_service(): zone = current_zone() if zone", "return getattr(game_services.service_manager, 'season_service', None) def lot_decoration_service(): return getattr(game_services.service_manager, 'lot_decoration_service', None) def get_style_service(): return", "active_household() if household is not None: home_zone = get_zone(household.home_zone_id) if home_zone is not", "return True return False def get_object_leak_tracker(): return _object_leak_tracker def get_zone_manager(): return _zone_manager def", "= current_zone() if zone is not None: return zone.lot def active_lot_id(): lot =", "None: return zone.prop_manager def social_group_manager(): return current_zone().social_group_manager def client_manager(): return game_services.service_manager.client_manager def get_first_client():", "get_hide_from_lot_picker = _zone.get_hide_from_lot_picker with sims4.reload.protected(globals()): tuning_managers = InstanceTuningManagers() get_instance_manager = tuning_managers.__getitem__ _account_service =", "get_zone_situation_manager(zone_id=None): if zone_id is None: return current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service", "if _object_leak_tracker is not None: _object_leak_tracker = None def create_object_leak_tracker(start=False): global _object_leak_tracker from", "zone_id is None: return current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service def locator_manager(): return current_zone().locator_manager def object_manager(zone_id=None):", "is not None and hasattr(zone, 'object_routing_service'): return zone.object_routing_service def get_landlord_service(): return getattr(game_services.service_manager, 'landlord_service',", "None _terrain_service = None definition_manager = None snippet_manager = None _terrain_object = None", "services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services, services) def stop_global_services(): global _zone_manager,", "get_locale(): client = get_first_client() return client.account.locale def relationship_service(): return game_services.service_manager.relationship_service def hidden_sim_service(): return", "0 @staticmethod def get_rent(*_, **__): return 0 @staticmethod def get_lot_description_id(*_, **__): pass @staticmethod", "= travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group is not None: if require_active_household and not travel_group.is_active_sim_in_travel_group: return", "None _distributor_service = None _intern_service = None _terrain_service = None definition_manager = None", "def get_selectable_sims(): return get_first_client().selectable_sims def owning_household_id_of_active_lot(): zone = current_zone() if zone is not", "= home_zone.lot if lot is not None: return lot.lot_id def privacy_service(): return current_zone().privacy_service", "current_zone().door_service def get_zone_modifier_service(): return current_zone().zone_modifier_service def get_demographics_service(): return current_zone().demographics_service def get_service_npc_service(): return current_zone().service_npc_service", "_event_manager = None _server_clock_service = None _persistence_service = None _terrain_service = None _distributor_service", "import _zone except ImportError: class _zone: @staticmethod def invite_sims_to_zone(*_, **__): pass @staticmethod def", "def get_building_type(*_, **__): return 0 @staticmethod def get_eco_footprint_value(*_, **__): return 0 @staticmethod def", "def create_object_leak_tracker(start=False): global _object_leak_tracker from performance.object_leak_tracker import ObjectLeakTracker if _object_leak_tracker is None: _object_leak_tracker", "if household is not None: home_zone = get_zone(household.home_zone_id) if home_zone is not None:", "def hidden_sim_service(): return game_services.service_manager.hidden_sim_service def weather_service(): return getattr(game_services.service_manager, 'weather_service', None) def season_service(): return", "return current_zone().ui_dialog_service def config_service(): return game_services.service_manager.config_service def travel_service(): return current_zone().travel_service def sim_quadtree(): return", "None: return zone.lot def active_lot_id(): lot = active_lot() if lot is not None:", "{}'.format(time_delta)) return True def start_global_services(initial_ticks): global _account_service, _zone_manager, _distributor_service, _intern_service create_server_clock(initial_ticks) from distributor.distributor_service", "_account_service]) sims4.core_services.start_services(init_critical_services, services) def stop_global_services(): global _zone_manager, _account_service, _event_manager, _server_clock_service, _persistence_service, _terrain_service, _distributor_service,", "def stop_global_services(): global _zone_manager, _account_service, _event_manager, _server_clock_service, _persistence_service, _terrain_service, _distributor_service, _intern_service, _object_leak_tracker _zone_manager.shutdown()", "client.household_id def active_household_lot_id(): household = active_household() if household is not None: home_zone =", "def current_region(): zone = current_zone() if zone is not None: return zone.region def", "if zone is not None: return zone.daycare_service def get_adoption_service(): return current_zone().adoption_service def get_laundry_service():", "zone.region def current_street(): zone = current_zone() if zone is not None: return zone.street", "return 0 @staticmethod def get_eco_footprint_value(*_, **__): return 0 @staticmethod def get_rent(*_, **__): return", "None _zone_manager = None _server_clock_service = None _persistence_service = None _distributor_service = None", "getattr(game_services.service_manager, 'lot_decoration_service', None) def get_style_service(): return game_services.service_manager.style_service def get_tutorial_service(): return game_services.service_manager.tutorial_service def calendar_service():", "game_services.service_manager.config_service def travel_service(): return current_zone().travel_service def sim_quadtree(): return current_zone().sim_quadtree def single_part_condition_list(): return current_zone().single_part_condition_list", "current_zone() return zone.get_zone_info() def current_region(): zone = current_zone() if zone is not None:", "game_services.service_manager.cheat_service def neighborhood_population_service(): return current_zone().neighborhood_population_service def get_reset_and_delete_service(): return current_zone().reset_and_delete_service def venue_service(): return current_zone().venue_service", "server_clock_service(): if _server_clock_service is None: return return _server_clock_service def create_server_clock(initial_ticks): global _server_clock_service import", "start(self): global gc_collection_enable, time_delta if gc_collection_enable: gc.disable() production_logger.info('GC disabled') gc_collection_enable = False else:", "return current_zone().single_part_condition_list def multi_part_condition_list(): return current_zone().multi_part_condition_list def get_event_manager(): return game_services.service_manager.event_manager_service def get_current_venue(): service", "return sims4.zone_utils.zone_id def current_zone_info(): zone = current_zone() return zone.get_zone_info() def current_region(): zone =", "def start(self): global gc_collection_enable, time_delta if gc_collection_enable: gc.disable() production_logger.info('GC disabled') gc_collection_enable = False", "get_laundry_service(): zone = current_zone() if zone is not None and hasattr(zone, 'laundry_service'): return", "return zone.lot.owner_household_id def owning_household_of_active_lot(): zone = current_zone() if zone is not None: return", "from sims4.tuning.instance_manager import TuningInstanceManager from sims4.tuning.tunable import Tunable, TunableReference import game_services import paths", "get_world_and_lot_description_id_from_zone_id(*_, **__): pass @staticmethod def get_is_eco_footprint_compatible_for_world_description(*_, **__): return False @staticmethod def get_hide_from_lot_picker(*_, **__):", "pass invite_sims_to_zone = _zone.invite_sims_to_zone get_house_description_id = _zone.get_house_description_id is_event_enabled = _zone.is_event_enabled get_building_type = _zone.get_building_type", "if client is not None: return client.active_sim_info def active_household(): client = client_manager().get_first_client() if", "services) def stop_global_services(): global _zone_manager, _account_service, _event_manager, _server_clock_service, _persistence_service, _terrain_service, _distributor_service, _intern_service, _object_leak_tracker", "None _event_manager = None _server_clock_service = None _persistence_service = None _terrain_service = None", "sims4.log.Logger('Services') time_delta = None gc_collection_enable = True class TimeStampService(sims4.service_manager.Service): def start(self): global gc_collection_enable,", "client = client_manager().get_first_client() if client is not None: return client.household def active_household_id(): client", "_zone except ImportError: class _zone: @staticmethod def invite_sims_to_zone(*_, **__): pass @staticmethod def get_house_description_id(*_,", "def get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service def holiday_service(): return getattr(game_services.service_manager, 'holiday_service', None) def global_policy_service(): return", "from sims4.tuning.tunable import Tunable, TunableReference import game_services import paths import sims4.reload import sims4.service_manager", "@staticmethod def is_event_enabled(*_, **__): pass invite_sims_to_zone = _zone.invite_sims_to_zone get_house_description_id = _zone.get_house_description_id is_event_enabled =", "None: if require_active_household and not travel_group.is_active_sim_in_travel_group: return else: return travel_group.object_preference_tracker def get_active_sim(): client", "tuning_managers = InstanceTuningManagers() get_instance_manager = tuning_managers.__getitem__ _account_service = None _zone_manager = None _server_clock_service", "client_manager().get_first_client() if client is not None: return client.active_sim_info def active_household(): client = client_manager().get_first_client()", "and not household.is_active_household: return return household.object_preference_tracker travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group is not", "def inventory_manager(zone_id=None): if zone_id is None: zone = current_zone() if zone is not", "**__): pass @staticmethod def is_event_enabled(*_, **__): pass invite_sims_to_zone = _zone.invite_sims_to_zone get_house_description_id = _zone.get_house_description_id", "**__): pass @staticmethod def get_world_and_lot_description_id_from_zone_id(*_, **__): pass @staticmethod def get_is_eco_footprint_compatible_for_world_description(*_, **__): return False", "= TerrainService() return _terrain_service def call_to_action_service(): return game_services.service_manager.call_to_action_service def trend_service(): return game_services.service_manager.trend_service def", "def get_persistence_service(): global _persistence_service if _persistence_service is None: from services.persistence_service import PersistenceService _persistence_service", "= time_stamp else: time_delta = time_stamp - time_delta production_logger.info('Time delta from loading start", "get_aging_service(): return game_services.service_manager.aging_service def get_cheat_service(): return game_services.service_manager.cheat_service def neighborhood_population_service(): return current_zone().neighborhood_population_service def get_reset_and_delete_service():", "INSTANCE_TUNING_DEFINITIONS: accessor_name = definition.manager_name accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name] = accessor production_logger =", "_server_clock_service def create_server_clock(initial_ticks): global _server_clock_service import clock _server_clock_service = clock.ServerClock(ticks=initial_ticks) def get_master_controller(): return", "get_first_client().selectable_sims def owning_household_id_of_active_lot(): zone = current_zone() if zone is not None: return zone.lot.owner_household_id", "_account_service, _zone_manager, _distributor_service, _intern_service create_server_clock(initial_ticks) from distributor.distributor_service import DistributorService from intern_service import InternService", "globals()[accessor_name] = accessor production_logger = sims4.log.ProductionLogger('Services') logger = sims4.log.Logger('Services') time_delta = None gc_collection_enable", "get_house_description_id = _zone.get_house_description_id is_event_enabled = _zone.is_event_enabled get_building_type = _zone.get_building_type get_eco_footprint_value = _zone.get_eco_footprint_value get_rent", "None def create_object_leak_tracker(start=False): global _object_leak_tracker from performance.object_leak_tracker import ObjectLeakTracker if _object_leak_tracker is None:", "travel_service(): return current_zone().travel_service def sim_quadtree(): return current_zone().sim_quadtree def single_part_condition_list(): return current_zone().single_part_condition_list def multi_part_condition_list():", "def get_gardening_service(): return current_zone().gardening_service def drama_scheduler_service(): return current_zone().drama_schedule_service def get_plex_service(): return current_zone().plex_service def", "AccountService from services.persistence_service import PersistenceService from services.terrain_service import TerrainService from sims4.tuning.serialization import FinalizeTuningService", "ensemble_service(): return current_zone().ensemble_service def sim_filter_service(zone_id=None): if zone_id is None: return current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service", "zone_spin_up_service(): return current_zone().zone_spin_up_service def household_manager(): return game_services.service_manager.household_manager def travel_group_manager(zone_id=None): if zone_id is None:", "current_zone().reset_and_delete_service def venue_service(): return current_zone().venue_service def venue_game_service(): return getattr(game_services.service_manager, 'venue_game_service', None) def zone_spin_up_service():", "is None: return current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service def get_photography_service(): return current_zone().photography_service def social_group_cluster_service(): return", "_intern_service, _object_leak_tracker _zone_manager.shutdown() _zone_manager = None tuning_managers.clear() _account_service = None _event_manager = None", "_server_clock_service = None _persistence_service = None _terrain_service = None _distributor_service = None _intern_service", "_object_leak_tracker def get_zone_manager(): return _zone_manager def current_zone(): if _zone_manager is not None: return", "get_first_client(): return client_manager().get_first_client() def get_selectable_sims(): return get_first_client().selectable_sims def owning_household_id_of_active_lot(): zone = current_zone() if", "zone = current_zone() if zone is not None and hasattr(zone, 'laundry_service'): return zone.laundry_service", "def server_clock_service(): if _server_clock_service is None: return return _server_clock_service def create_server_clock(initial_ticks): global _server_clock_service", "game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service(): return current_zone().ui_dialog_service def config_service(): return game_services.service_manager.config_service def travel_service(): return current_zone().travel_service", "sims4.core_services.start_services(init_critical_services, services) def stop_global_services(): global _zone_manager, _account_service, _event_manager, _server_clock_service, _persistence_service, _terrain_service, _distributor_service, _intern_service,", "functools import gc import time from services.tuning_managers import InstanceTuningManagers from sims4.resources import INSTANCE_TUNING_DEFINITIONS", "def current_zone_id(): if _zone_manager is not None: return sims4.zone_utils.zone_id def current_zone_info(): zone =", "current_zone().conditional_layer_service def get_sickness_service(): return game_services.service_manager.sickness_service def get_curfew_service(): return game_services.service_manager.curfew_service def get_locale(): client =", "home_zone is not None: lot = home_zone.lot if lot is not None: return", "logger.info('TimeStampService start at {}'.format(time_stamp)) if time_delta is None: time_delta = time_stamp else: time_delta", "return game_services.service_manager.rabbit_hole_service def holiday_service(): return getattr(game_services.service_manager, 'holiday_service', None) def global_policy_service(): return getattr(game_services.service_manager, 'global_policy_service',", "and not travel_group.is_active_sim_in_travel_group: return else: return travel_group.object_preference_tracker def get_active_sim(): client = client_manager().get_first_client() if", "def active_lot_id(): lot = active_lot() if lot is not None: return lot.lot_id def", "argparse import functools import gc import time from services.tuning_managers import InstanceTuningManagers from sims4.resources", "not None: return game_services.service_manager.client_object_managers return () def sim_info_manager(): return game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None): if", "_intern_service def get_zone_situation_manager(zone_id=None): if zone_id is None: return current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service():", "return game_services.service_manager.object_lost_and_found_service def street_service(): return getattr(game_services.service_manager, 'street_service', None) def c_api_gsi_dump(): import server_commands.developer_commands server_commands.developer_commands.gsi_dump()", "lot_decoration_service(): return getattr(game_services.service_manager, 'lot_decoration_service', None) def get_style_service(): return game_services.service_manager.style_service def get_tutorial_service(): return game_services.service_manager.tutorial_service", "return getattr(game_services.service_manager, 'club_service', None) def get_culling_service(): return current_zone().culling_service def get_gardening_service(): return current_zone().gardening_service def", "if household_id: return get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id): return", "zone_id is None: return current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service def ensemble_service():", "def sim_spawner_service(zone_id=None): if zone_id is None: return current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service def locator_manager(): return", "get_curfew_service(): return game_services.service_manager.curfew_service def get_locale(): client = get_first_client() return client.account.locale def relationship_service(): return", "= current_zone() else: zone = _zone_manager.get(zone_id) if zone is not None: return zone.object_manager", "not None: return zone.street def get_zone(zone_id, allow_uninstantiated_zones=False): if _zone_manager is not None: return", "not None: return client.active_sim_info def active_household(): client = client_manager().get_first_client() if client is not", "**__): pass @staticmethod def get_world_description_id(*_, **__): pass @staticmethod def get_world_id(*_, **__): pass @staticmethod", "def get_hide_from_lot_picker(*_, **__): pass @staticmethod def is_event_enabled(*_, **__): pass invite_sims_to_zone = _zone.invite_sims_to_zone get_house_description_id", "None: zone = current_zone() else: zone = _zone_manager.get(zone_id) if zone is not None:", "zone_id is None: zone = current_zone() if zone is not None: return zone.inventory_manager", "import ZoneManager parser = argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False, action='store_true') (args, unused_args) = parser.parse_known_args() if", "def get_master_controller(): return current_zone().master_controller def get_persistence_service(): global _persistence_service if _persistence_service is None: from", "return getattr(game_services.service_manager, 'holiday_service', None) def global_policy_service(): return getattr(game_services.service_manager, 'global_policy_service', None) def narrative_service(): return", "start_global_services(initial_ticks): global _account_service, _zone_manager, _distributor_service, _intern_service create_server_clock(initial_ticks) from distributor.distributor_service import DistributorService from intern_service", "return zone.region def current_street(): zone = current_zone() if zone is not None: return", "None _intern_service = None _terrain_service = None definition_manager = None snippet_manager = None", "@staticmethod def get_lot_description_id(*_, **__): pass @staticmethod def get_world_description_id(*_, **__): pass @staticmethod def get_world_id(*_,", "return return _server_clock_service def create_server_clock(initial_ticks): global _server_clock_service import clock _server_clock_service = clock.ServerClock(ticks=initial_ticks) def", "allow_uninstantiated_zones=False): if _zone_manager is not None: return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot(): zone =", "= None _terrain_service = None _distributor_service = None _intern_service = None if _object_leak_tracker", "client_manager().get_first_client() if client is not None: return client.household def active_household_id(): client = client_manager().get_first_client()", "_account_service = AccountService() _zone_manager = ZoneManager() _distributor_service = DistributorService() _intern_service = InternService() init_critical_services", "= PersistenceService() return _persistence_service def get_distributor_service(): return _distributor_service def get_fire_service(): return current_zone().fire_service def", "**__): return 0 @staticmethod def get_eco_footprint_value(*_, **__): return 0 @staticmethod def get_rent(*_, **__):", "start is {}'.format(time_delta)) logger.info('Time delta from loading start is {}'.format(time_delta)) return True def", "not None: _object_leak_tracker = None def create_object_leak_tracker(start=False): global _object_leak_tracker from performance.object_leak_tracker import ObjectLeakTracker", "get_first_client() return client.account.locale def relationship_service(): return game_services.service_manager.relationship_service def hidden_sim_service(): return game_services.service_manager.hidden_sim_service def weather_service():", "daycare_service(): zone = current_zone() if zone is not None: return zone.daycare_service def get_adoption_service():", "return lot.lot_id def client_object_managers(): if game_services.service_manager is not None: return game_services.service_manager.client_object_managers return ()", "return game_services.service_manager.cheat_service def neighborhood_population_service(): return current_zone().neighborhood_population_service def get_reset_and_delete_service(): return current_zone().reset_and_delete_service def venue_service(): return", "return current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service def locator_manager(): return current_zone().locator_manager def object_manager(zone_id=None): if zone_id is", "def get_style_service(): return game_services.service_manager.style_service def get_tutorial_service(): return game_services.service_manager.tutorial_service def calendar_service(): return current_zone().calendar_service def", "get_roommate_service(): return getattr(game_services.service_manager, 'roommate_service', None) def get_club_service(): return getattr(game_services.service_manager, 'club_service', None) def get_culling_service():", "is not None: return client.household def active_household_id(): client = client_manager().get_first_client() if client is", "return client.account.locale def relationship_service(): return game_services.service_manager.relationship_service def hidden_sim_service(): return game_services.service_manager.hidden_sim_service def weather_service(): return", "_distributor_service, _intern_service, _object_leak_tracker _zone_manager.shutdown() _zone_manager = None tuning_managers.clear() _account_service = None _event_manager =", "TunableReference import game_services import paths import sims4.reload import sims4.service_manager try: import _zone except", "client is not None: return client.household_id def active_household_lot_id(): household = active_household() if household", "zone is not None: return zone.street def get_zone(zone_id, allow_uninstantiated_zones=False): if _zone_manager is not", "sim_quadtree(): return current_zone().sim_quadtree def single_part_condition_list(): return current_zone().single_part_condition_list def multi_part_condition_list(): return current_zone().multi_part_condition_list def get_event_manager():", "require_active_household and not household.is_active_household: return return household.object_preference_tracker travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group is", "lot is not None: return lot.lot_id def privacy_service(): return current_zone().privacy_service def autonomy_service(): return", "client = client_manager().get_first_client() if client is not None: return client.active_sim_info def active_household(): client", "current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu(): pass def account_service(): return _account_service def business_service(): bs = game_services.service_manager.business_service", "current_zone().plex_service def get_door_service(): return current_zone().door_service def get_zone_modifier_service(): return current_zone().zone_modifier_service def get_demographics_service(): return current_zone().demographics_service", "create_server_clock(initial_ticks) from distributor.distributor_service import DistributorService from intern_service import InternService from server.account_service import AccountService", "zone is not None: return zone.daycare_service def get_adoption_service(): return current_zone().adoption_service def get_laundry_service(): zone", "client_manager().get_first_client() if client is not None: return client.active_sim def active_sim_info(): client = client_manager().get_first_client()", "autonomy_service(): return current_zone().autonomy_service def get_aging_service(): return game_services.service_manager.aging_service def get_cheat_service(): return game_services.service_manager.cheat_service def neighborhood_population_service():", "return zone.object_manager def inventory_manager(zone_id=None): if zone_id is None: zone = current_zone() if zone", "def drama_scheduler_service(): return current_zone().drama_schedule_service def get_plex_service(): return current_zone().plex_service def get_door_service(): return current_zone().door_service def", "return zone.lot def active_lot_id(): lot = active_lot() if lot is not None: return", "@staticmethod def get_is_eco_footprint_compatible_for_world_description(*_, **__): return False @staticmethod def get_hide_from_lot_picker(*_, **__): pass @staticmethod def", "@staticmethod def get_eco_footprint_value(*_, **__): return 0 @staticmethod def get_rent(*_, **__): return 0 @staticmethod", "@staticmethod def get_house_description_id(*_, **__): pass @staticmethod def get_building_type(*_, **__): return 0 @staticmethod def", "def active_household_id(): client = client_manager().get_first_client() if client is not None: return client.household_id def", "= current_zone() else: zone = _zone_manager.get(zone_id) if zone is not None: return zone.prop_manager", "gc.enable() production_logger.info('GC enabled') gc_collection_enable = True time_stamp = time.time() production_logger.info('TimeStampService start at {}'.format(time_stamp))", "get_service_npc_service(): return current_zone().service_npc_service def conditional_layer_service(): return current_zone().conditional_layer_service def get_sickness_service(): return game_services.service_manager.sickness_service def get_curfew_service():", "is not None: return sims4.zone_utils.zone_id def current_zone_info(): zone = current_zone() return zone.get_zone_info() def", "def get_landlord_service(): return getattr(game_services.service_manager, 'landlord_service', None) def get_roommate_service(): return getattr(game_services.service_manager, 'roommate_service', None) def", "not None: return client.active_sim def active_sim_info(): client = client_manager().get_first_client() if client is not", "current_zone().single_part_condition_list def multi_part_condition_list(): return current_zone().multi_part_condition_list def get_event_manager(): return game_services.service_manager.event_manager_service def get_current_venue(): service =", "invite_sims_to_zone = _zone.invite_sims_to_zone get_house_description_id = _zone.get_house_description_id is_event_enabled = _zone.is_event_enabled get_building_type = _zone.get_building_type get_eco_footprint_value", "time from services.tuning_managers import InstanceTuningManagers from sims4.resources import INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager import TuningInstanceManager", "getattr(game_services.service_manager, 'club_service', None) def get_culling_service(): return current_zone().culling_service def get_gardening_service(): return current_zone().gardening_service def drama_scheduler_service():", "current_zone().autonomy_service def get_aging_service(): return game_services.service_manager.aging_service def get_cheat_service(): return game_services.service_manager.cheat_service def neighborhood_population_service(): return current_zone().neighborhood_population_service", "if zone is not None and hasattr(zone, 'object_routing_service'): return zone.object_routing_service def get_landlord_service(): return", "travel_group.is_active_sim_in_travel_group: return else: return travel_group.object_preference_tracker def get_active_sim(): client = client_manager().get_first_client() if client is", "return _terrain_service def call_to_action_service(): return game_services.service_manager.call_to_action_service def trend_service(): return game_services.service_manager.trend_service def time_service(): return", "_zone_manager def current_zone(): if _zone_manager is not None: return _zone_manager.current_zone def current_zone_id(): if", "def get_active_sim(): client = client_manager().get_first_client() if client is not None: return client.active_sim def", "current_zone().career_service def get_story_progression_service(): return current_zone().story_progression_service def daycare_service(): zone = current_zone() if zone is", "is not None: return zone.region def current_street(): zone = current_zone() if zone is", "return _zone_manager def current_zone(): if _zone_manager is not None: return _zone_manager.current_zone def current_zone_id():", "= current_zone() if zone is not None and hasattr(zone, 'laundry_service'): return zone.laundry_service def", "ui_dialog_service(): return current_zone().ui_dialog_service def config_service(): return game_services.service_manager.config_service def travel_service(): return current_zone().travel_service def sim_quadtree():", "def get_door_service(): return current_zone().door_service def get_zone_modifier_service(): return current_zone().zone_modifier_service def get_demographics_service(): return current_zone().demographics_service def", "InternService from server.account_service import AccountService from services.persistence_service import PersistenceService from services.terrain_service import TerrainService", "= True time_stamp = time.time() production_logger.info('TimeStampService start at {}'.format(time_stamp)) logger.info('TimeStampService start at {}'.format(time_stamp))", "def global_policy_service(): return getattr(game_services.service_manager, 'global_policy_service', None) def narrative_service(): return getattr(game_services.service_manager, 'narrative_service', None) def", "inventory_manager(zone_id=None): if zone_id is None: zone = current_zone() if zone is not None:", "from services.persistence_service import PersistenceService from services.terrain_service import TerrainService from sims4.tuning.serialization import FinalizeTuningService from", "None) def get_culling_service(): return current_zone().culling_service def get_gardening_service(): return current_zone().gardening_service def drama_scheduler_service(): return current_zone().drama_schedule_service", "action='store_true') (args, unused_args) = parser.parse_known_args() if args.python_autoleak: create_object_leak_tracker() _account_service = AccountService() _zone_manager =", "import ObjectLeakTracker if _object_leak_tracker is None: _object_leak_tracker = ObjectLeakTracker() if start: _object_leak_tracker.start_tracking() return", "get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service(): return current_zone().ui_dialog_service", "not None and hasattr(zone, 'laundry_service'): return zone.laundry_service def get_object_routing_service(): zone = current_zone() if", "current_region(): zone = current_zone() if zone is not None: return zone.region def current_street():", "def get_tutorial_service(): return game_services.service_manager.tutorial_service def calendar_service(): return current_zone().calendar_service def get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service def", "'organization_service', None) def get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service def street_service(): return getattr(game_services.service_manager, 'street_service', None) def", "get_club_service(): return getattr(game_services.service_manager, 'club_service', None) def get_culling_service(): return current_zone().culling_service def get_gardening_service(): return current_zone().gardening_service", "game_services.service_manager.aging_service def get_cheat_service(): return game_services.service_manager.cheat_service def neighborhood_population_service(): return current_zone().neighborhood_population_service def get_reset_and_delete_service(): return current_zone().reset_and_delete_service", "active_sim_info(): client = client_manager().get_first_client() if client is not None: return client.active_sim_info def active_household():", "household = active_household() if household is not None: home_zone = get_zone(household.home_zone_id) if home_zone", "None _intern_service = None if _object_leak_tracker is not None: _object_leak_tracker = None def", "locator_manager(): return current_zone().locator_manager def object_manager(zone_id=None): if zone_id is None: zone = current_zone() else:", "_persistence_service def get_distributor_service(): return _distributor_service def get_fire_service(): return current_zone().fire_service def get_career_service(): return current_zone().career_service", "current_zone().multi_part_condition_list def get_event_manager(): return game_services.service_manager.event_manager_service def get_current_venue(): service = venue_service() if service is", "sims4.tuning.instance_manager import TuningInstanceManager from sims4.tuning.tunable import Tunable, TunableReference import game_services import paths import", "clock _server_clock_service = clock.ServerClock(ticks=initial_ticks) def get_master_controller(): return current_zone().master_controller def get_persistence_service(): global _persistence_service if", "def autonomy_service(): return current_zone().autonomy_service def get_aging_service(): return game_services.service_manager.aging_service def get_cheat_service(): return game_services.service_manager.cheat_service def", "get_tutorial_service(): return game_services.service_manager.tutorial_service def calendar_service(): return current_zone().calendar_service def get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service def holiday_service():", "return game_services.service_manager.curfew_service def get_locale(): client = get_first_client() return client.account.locale def relationship_service(): return game_services.service_manager.relationship_service", "_zone.get_eco_footprint_value get_rent = _zone.get_rent get_lot_description_id = _zone.get_lot_description_id get_world_description_id = _zone.get_world_description_id get_world_id = _zone.get_world_id", "gc_collection_enable = True class TimeStampService(sims4.service_manager.Service): def start(self): global gc_collection_enable, time_delta if gc_collection_enable: gc.disable()", "service is not None: return service.active_venue def get_intern_service(): return _intern_service def get_zone_situation_manager(zone_id=None): if", "get_rent = _zone.get_rent get_lot_description_id = _zone.get_lot_description_id get_world_description_id = _zone.get_world_description_id get_world_id = _zone.get_world_id get_world_and_lot_description_id_from_zone_id", "def get_culling_service(): return current_zone().culling_service def get_gardening_service(): return current_zone().gardening_service def drama_scheduler_service(): return current_zone().drama_schedule_service def", "is None: return current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service def ensemble_service(): return", "= None _persistence_service = None _terrain_service = None _distributor_service = None _intern_service =", "def get_terrain_service(): global _terrain_service if _terrain_service is None: from services.terrain_service import TerrainService _terrain_service", "= functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name] = accessor production_logger = sims4.log.ProductionLogger('Services') logger = sims4.log.Logger('Services') time_delta", "= time.time() production_logger.info('TimeStampService start at {}'.format(time_stamp)) logger.info('TimeStampService start at {}'.format(time_stamp)) if time_delta is", "current_zone().privacy_service def autonomy_service(): return current_zone().autonomy_service def get_aging_service(): return game_services.service_manager.aging_service def get_cheat_service(): return game_services.service_manager.cheat_service", "class TimeStampService(sims4.service_manager.Service): def start(self): global gc_collection_enable, time_delta if gc_collection_enable: gc.disable() production_logger.info('GC disabled') gc_collection_enable", "not None: return service.active_venue def get_intern_service(): return _intern_service def get_zone_situation_manager(zone_id=None): if zone_id is", "import sims4.service_manager try: import _zone except ImportError: class _zone: @staticmethod def invite_sims_to_zone(*_, **__):", "**__): pass @staticmethod def get_world_id(*_, **__): pass @staticmethod def get_world_and_lot_description_id_from_zone_id(*_, **__): pass @staticmethod", "time_stamp else: time_delta = time_stamp - time_delta production_logger.info('Time delta from loading start is", "= True class TimeStampService(sims4.service_manager.Service): def start(self): global gc_collection_enable, time_delta if gc_collection_enable: gc.disable() production_logger.info('GC", "_zone_manager = None _server_clock_service = None _persistence_service = None _distributor_service = None _intern_service", "privacy_service(): return current_zone().privacy_service def autonomy_service(): return current_zone().autonomy_service def get_aging_service(): return game_services.service_manager.aging_service def get_cheat_service():", "pass def account_service(): return _account_service def business_service(): bs = game_services.service_manager.business_service return bs def", "None: from services.persistence_service import PersistenceService _persistence_service = PersistenceService() return _persistence_service def get_distributor_service(): return", "allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot(): zone = current_zone() if zone is not None: return zone.lot", "None: household = household_manager().get(zone.lot.owner_household_id) if household is not None: if require_active_household and not", "household_manager(): return game_services.service_manager.household_manager def travel_group_manager(zone_id=None): if zone_id is None: zone = current_zone() if", "sims4.tuning.tunable import Tunable, TunableReference import game_services import paths import sims4.reload import sims4.service_manager try:", "service = venue_service() if service is not None: return service.active_venue def get_intern_service(): return", "zone.daycare_service def get_adoption_service(): return current_zone().adoption_service def get_laundry_service(): zone = current_zone() if zone is", "import clock _server_clock_service = clock.ServerClock(ticks=initial_ticks) def get_master_controller(): return current_zone().master_controller def get_persistence_service(): global _persistence_service", "sim_filter_service(zone_id=None): if zone_id is None: return current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service def get_photography_service(): return current_zone().photography_service", "zone is not None: return zone.prop_manager def social_group_manager(): return current_zone().social_group_manager def client_manager(): return", "zone is not None: return zone.region def current_street(): zone = current_zone() if zone", "if _persistence_service is None: from services.persistence_service import PersistenceService _persistence_service = PersistenceService() return _persistence_service", "return current_zone().culling_service def get_gardening_service(): return current_zone().gardening_service def drama_scheduler_service(): return current_zone().drama_schedule_service def get_plex_service(): return", "if _server_clock_service is None: return return _server_clock_service def create_server_clock(initial_ticks): global _server_clock_service import clock", "= None gc_collection_enable = True class TimeStampService(sims4.service_manager.Service): def start(self): global gc_collection_enable, time_delta if", "None: return service.active_venue def get_intern_service(): return _intern_service def get_zone_situation_manager(zone_id=None): if zone_id is None:", "return current_zone().career_service def get_story_progression_service(): return current_zone().story_progression_service def daycare_service(): zone = current_zone() if zone", "bs = game_services.service_manager.business_service return bs def get_terrain_service(): global _terrain_service if _terrain_service is None:", "None) def lot_decoration_service(): return getattr(game_services.service_manager, 'lot_decoration_service', None) def get_style_service(): return game_services.service_manager.style_service def get_tutorial_service():", "time_service(): return game_services.service_manager.time_service def game_clock_service(): return game_services.service_manager.game_clock def server_clock_service(): if _server_clock_service is None:", "if zone is not None: return zone.travel_group_manager return return _zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None): if", "None: _object_leak_tracker = ObjectLeakTracker() if start: _object_leak_tracker.start_tracking() return True return False def get_object_leak_tracker():", "is not None: lot = home_zone.lot if lot is not None: return lot.lot_id", "def get_first_client(): return client_manager().get_first_client() def get_selectable_sims(): return get_first_client().selectable_sims def owning_household_id_of_active_lot(): zone = current_zone()", "return game_services.service_manager.hidden_sim_service def weather_service(): return getattr(game_services.service_manager, 'weather_service', None) def season_service(): return getattr(game_services.service_manager, 'season_service',", "get_reset_and_delete_service(): return current_zone().reset_and_delete_service def venue_service(): return current_zone().venue_service def venue_game_service(): return getattr(game_services.service_manager, 'venue_game_service', None)", "def current_street(): zone = current_zone() if zone is not None: return zone.street def", "if game_services.service_manager is not None: return game_services.service_manager.client_object_managers return () def sim_info_manager(): return game_services.service_manager.sim_info_manager", "current_zone().ensemble_service def sim_filter_service(zone_id=None): if zone_id is None: return current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service def get_photography_service():", "= InternService() init_critical_services = [server_clock_service(), get_persistence_service()] services = [_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers", "= household_manager().get(zone.lot.owner_household_id) if household is not None: if require_active_household and not household.is_active_household: return", "TimeStampService(sims4.service_manager.Service): def start(self): global gc_collection_enable, time_delta if gc_collection_enable: gc.disable() production_logger.info('GC disabled') gc_collection_enable =", "current_zone_info(): zone = current_zone() return zone.get_zone_info() def current_region(): zone = current_zone() if zone", "parser = argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False, action='store_true') (args, unused_args) = parser.parse_known_args() if args.python_autoleak: create_object_leak_tracker()", "def holiday_service(): return getattr(game_services.service_manager, 'holiday_service', None) def global_policy_service(): return getattr(game_services.service_manager, 'global_policy_service', None) def", "active_household_id(): client = client_manager().get_first_client() if client is not None: return client.household_id def active_household_lot_id():", "if client is not None: return client.household_id def active_household_lot_id(): household = active_household() if", "pass @staticmethod def get_world_id(*_, **__): pass @staticmethod def get_world_and_lot_description_id_from_zone_id(*_, **__): pass @staticmethod def", "None: return sims4.zone_utils.zone_id def current_zone_info(): zone = current_zone() return zone.get_zone_info() def current_region(): zone", "if _zone_manager is not None: return sims4.zone_utils.zone_id def current_zone_info(): zone = current_zone() return", "return _zone_manager.get(zone_id).sim_spawner_service def locator_manager(): return current_zone().locator_manager def object_manager(zone_id=None): if zone_id is None: zone", "parser.add_argument('--python_autoleak', default=False, action='store_true') (args, unused_args) = parser.parse_known_args() if args.python_autoleak: create_object_leak_tracker() _account_service = AccountService()", "return _object_leak_tracker def get_zone_manager(): return _zone_manager def current_zone(): if _zone_manager is not None:", "get_instance_manager = tuning_managers.__getitem__ _account_service = None _zone_manager = None _server_clock_service = None _persistence_service", "None _persistence_service = None _terrain_service = None _distributor_service = None _intern_service = None", "def social_group_cluster_service(): return current_zone().social_group_cluster_service def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if", "if zone_id is None: zone = current_zone() else: zone = _zone_manager.get(zone_id) if zone", "zone = current_zone() if zone is not None: household = household_manager().get(zone.lot.owner_household_id) if household", "if lot is not None: return lot.lot_id def client_object_managers(): if game_services.service_manager is not", "household.object_preference_tracker travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group is not None: if require_active_household and not", "return zone.get_zone_info() def current_region(): zone = current_zone() if zone is not None: return", "= time_stamp - time_delta production_logger.info('Time delta from loading start is {}'.format(time_delta)) logger.info('Time delta", "None: home_zone = get_zone(household.home_zone_id) if home_zone is not None: lot = home_zone.lot if", "= None _server_clock_service = None _persistence_service = None _distributor_service = None _intern_service =", "game_services.service_manager.hidden_sim_service def weather_service(): return getattr(game_services.service_manager, 'weather_service', None) def season_service(): return getattr(game_services.service_manager, 'season_service', None)", "if start: _object_leak_tracker.start_tracking() return True return False def get_object_leak_tracker(): return _object_leak_tracker def get_zone_manager():", "start at {}'.format(time_stamp)) logger.info('TimeStampService start at {}'.format(time_stamp)) if time_delta is None: time_delta =", "= _zone.get_eco_footprint_value get_rent = _zone.get_rent get_lot_description_id = _zone.get_lot_description_id get_world_description_id = _zone.get_world_description_id get_world_id =", "not None: return zone.posture_graph_service return return _zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None): if zone_id is None:", "_account_service = None _event_manager = None _server_clock_service = None _persistence_service = None _terrain_service", "return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service(): return current_zone().ui_dialog_service def config_service(): return game_services.service_manager.config_service def travel_service(): return", "_intern_service = None if _object_leak_tracker is not None: _object_leak_tracker = None def create_object_leak_tracker(start=False):", "zone.object_manager def inventory_manager(zone_id=None): if zone_id is None: zone = current_zone() if zone is", "return _zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None): if household_id: return get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id): return", "current_zone() if zone is not None: return zone.street def get_zone(zone_id, allow_uninstantiated_zones=False): if _zone_manager", "if zone is not None: return zone.street def get_zone(zone_id, allow_uninstantiated_zones=False): if _zone_manager is", "current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu(): pass def account_service():", "get_landlord_service(): return getattr(game_services.service_manager, 'landlord_service', None) def get_roommate_service(): return getattr(game_services.service_manager, 'roommate_service', None) def get_club_service():", "_persistence_service, _terrain_service, _distributor_service, _intern_service, _object_leak_tracker _zone_manager.shutdown() _zone_manager = None tuning_managers.clear() _account_service = None", "_zone: @staticmethod def invite_sims_to_zone(*_, **__): pass @staticmethod def get_house_description_id(*_, **__): pass @staticmethod def", "return current_zone().gardening_service def drama_scheduler_service(): return current_zone().drama_schedule_service def get_plex_service(): return current_zone().plex_service def get_door_service(): return", "current_zone().story_progression_service def daycare_service(): zone = current_zone() if zone is not None: return zone.daycare_service", "is None: time_delta = time_stamp else: time_delta = time_stamp - time_delta production_logger.info('Time delta", "production_logger.info('GC disabled') gc_collection_enable = False else: gc.enable() production_logger.info('GC enabled') gc_collection_enable = True time_stamp", "not None: return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot(): zone = current_zone() if zone is", "from server.account_service import AccountService from services.persistence_service import PersistenceService from services.terrain_service import TerrainService from", "return current_zone().travel_service def sim_quadtree(): return current_zone().sim_quadtree def single_part_condition_list(): return current_zone().single_part_condition_list def multi_part_condition_list(): return", "TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services, services) def stop_global_services(): global _zone_manager, _account_service, _event_manager,", "[_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers = [] for definition in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers))", "= current_zone() if zone is not None: return zone.inventory_manager return return _zone_manager.get(zone_id).inventory_manager def", "def venue_game_service(): return getattr(game_services.service_manager, 'venue_game_service', None) def zone_spin_up_service(): return current_zone().zone_spin_up_service def household_manager(): return", "import gc import time from services.tuning_managers import InstanceTuningManagers from sims4.resources import INSTANCE_TUNING_DEFINITIONS from", "household = household_manager().get(zone.lot.owner_household_id) if household is not None: if require_active_household and not household.is_active_household:", "for definition in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services,", "not None: return lot.lot_id def privacy_service(): return current_zone().privacy_service def autonomy_service(): return current_zone().autonomy_service def", "pass @staticmethod def get_house_description_id(*_, **__): pass @staticmethod def get_building_type(*_, **__): return 0 @staticmethod", "time_delta = time_stamp - time_delta production_logger.info('Time delta from loading start is {}'.format(time_delta)) logger.info('Time", "_account_service = None _zone_manager = None _server_clock_service = None _persistence_service = None _distributor_service", "production_logger = sims4.log.ProductionLogger('Services') logger = sims4.log.Logger('Services') time_delta = None gc_collection_enable = True class", "ObjectLeakTracker if _object_leak_tracker is None: _object_leak_tracker = ObjectLeakTracker() if start: _object_leak_tracker.start_tracking() return True", "get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker = _zone.get_hide_from_lot_picker with sims4.reload.protected(globals()): tuning_managers =", "import INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager import TuningInstanceManager from sims4.tuning.tunable import Tunable, TunableReference import game_services", "def get_current_venue(): service = venue_service() if service is not None: return service.active_venue def", "TerrainService() return _terrain_service def call_to_action_service(): return game_services.service_manager.call_to_action_service def trend_service(): return game_services.service_manager.trend_service def time_service():", "None _distributor_service = None _intern_service = None if _object_leak_tracker is not None: _object_leak_tracker", "return getattr(game_services.service_manager, 'narrative_service', None) def organization_service(): return getattr(game_services.service_manager, 'organization_service', None) def get_object_lost_and_found_service(): return", "game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu(): pass def account_service(): return _account_service def business_service(): bs", "is_event_enabled = _zone.is_event_enabled get_building_type = _zone.get_building_type get_eco_footprint_value = _zone.get_eco_footprint_value get_rent = _zone.get_rent get_lot_description_id", "def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def", "None) def get_roommate_service(): return getattr(game_services.service_manager, 'roommate_service', None) def get_club_service(): return getattr(game_services.service_manager, 'club_service', None)", "_zone_manager = ZoneManager() _distributor_service = DistributorService() _intern_service = InternService() init_critical_services = [server_clock_service(), get_persistence_service()]", "def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu(): pass def account_service(): return", "def prop_manager(zone_id=None): if zone_id is None: zone = current_zone() else: zone = _zone_manager.get(zone_id)", "_object_leak_tracker is not None: _object_leak_tracker = None def create_object_leak_tracker(start=False): global _object_leak_tracker from performance.object_leak_tracker", "gc_collection_enable = True time_stamp = time.time() production_logger.info('TimeStampService start at {}'.format(time_stamp)) logger.info('TimeStampService start at", "def object_preference_tracker(require_active_household=False): zone = current_zone() if zone is not None: household = household_manager().get(zone.lot.owner_household_id)", "'club_service', None) def get_culling_service(): return current_zone().culling_service def get_gardening_service(): return current_zone().gardening_service def drama_scheduler_service(): return", "get_intern_service(): return _intern_service def get_zone_situation_manager(zone_id=None): if zone_id is None: return current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager", "zone is not None: return zone.lot.owner_household_id def owning_household_of_active_lot(): zone = current_zone() if zone", "game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu(): pass def", "travel_group is not None: if require_active_household and not travel_group.is_active_sim_in_travel_group: return else: return travel_group.object_preference_tracker", "**__): pass invite_sims_to_zone = _zone.invite_sims_to_zone get_house_description_id = _zone.get_house_description_id is_event_enabled = _zone.is_event_enabled get_building_type =", "zone.street def get_zone(zone_id, allow_uninstantiated_zones=False): if _zone_manager is not None: return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def", "def get_house_description_id(*_, **__): pass @staticmethod def get_building_type(*_, **__): return 0 @staticmethod def get_eco_footprint_value(*_,", "call_to_action_service(): return game_services.service_manager.call_to_action_service def trend_service(): return game_services.service_manager.trend_service def time_service(): return game_services.service_manager.time_service def game_clock_service():", "not None: if require_active_household and not household.is_active_household: return return household.object_preference_tracker travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id)", "@staticmethod def get_hide_from_lot_picker(*_, **__): pass @staticmethod def is_event_enabled(*_, **__): pass invite_sims_to_zone = _zone.invite_sims_to_zone", "None for definition in INSTANCE_TUNING_DEFINITIONS: accessor_name = definition.manager_name accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name]", "get_eco_footprint_value = _zone.get_eco_footprint_value get_rent = _zone.get_rent get_lot_description_id = _zone.get_lot_description_id get_world_description_id = _zone.get_world_description_id get_world_id", "return current_zone().photography_service def social_group_cluster_service(): return current_zone().social_group_cluster_service def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client):", "current_zone() if zone is not None: return zone.travel_group_manager return return _zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None):", "return current_zone().privacy_service def autonomy_service(): return current_zone().autonomy_service def get_aging_service(): return game_services.service_manager.aging_service def get_cheat_service(): return", "current_zone().drama_schedule_service def get_plex_service(): return current_zone().plex_service def get_door_service(): return current_zone().door_service def get_zone_modifier_service(): return current_zone().zone_modifier_service", "get_terrain_service(), _zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services, services) def stop_global_services(): global _zone_manager, _account_service, _event_manager, _server_clock_service, _persistence_service,", "def start_global_services(initial_ticks): global _account_service, _zone_manager, _distributor_service, _intern_service create_server_clock(initial_ticks) from distributor.distributor_service import DistributorService from", "game_services.service_manager.client_manager def get_first_client(): return client_manager().get_first_client() def get_selectable_sims(): return get_first_client().selectable_sims def owning_household_id_of_active_lot(): zone =", "_terrain_service def call_to_action_service(): return game_services.service_manager.call_to_action_service def trend_service(): return game_services.service_manager.trend_service def time_service(): return game_services.service_manager.time_service", "def get_curfew_service(): return game_services.service_manager.curfew_service def get_locale(): client = get_first_client() return client.account.locale def relationship_service():", "return current_zone().neighborhood_population_service def get_reset_and_delete_service(): return current_zone().reset_and_delete_service def venue_service(): return current_zone().venue_service def venue_game_service(): return", "None: return lot.lot_id def privacy_service(): return current_zone().privacy_service def autonomy_service(): return current_zone().autonomy_service def get_aging_service():", "_distributor_service def get_fire_service(): return current_zone().fire_service def get_career_service(): return current_zone().career_service def get_story_progression_service(): return current_zone().story_progression_service", "zone = _zone_manager.get(zone_id) if zone is not None: return zone.object_manager def inventory_manager(zone_id=None): if", "default=False, action='store_true') (args, unused_args) = parser.parse_known_args() if args.python_autoleak: create_object_leak_tracker() _account_service = AccountService() _zone_manager", "return zone.street def get_zone(zone_id, allow_uninstantiated_zones=False): if _zone_manager is not None: return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones)", "get_world_description_id = _zone.get_world_description_id get_world_id = _zone.get_world_id get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker", "None: zone = current_zone() if zone is not None: return zone.posture_graph_service return return", "_intern_service = InternService() init_critical_services = [server_clock_service(), get_persistence_service()] services = [_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService]", "current_zone().n_p_c_hosted_situation_service def ensemble_service(): return current_zone().ensemble_service def sim_filter_service(zone_id=None): if zone_id is None: return current_zone().sim_filter_service", "if require_active_household and not household.is_active_household: return return household.object_preference_tracker travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group", "if zone_id is None: zone = current_zone() if zone is not None: return", "return current_zone().drama_schedule_service def get_plex_service(): return current_zone().plex_service def get_door_service(): return current_zone().door_service def get_zone_modifier_service(): return", "return current_zone().social_group_cluster_service def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client)", "tuning_managers.clear() _account_service = None _event_manager = None _server_clock_service = None _persistence_service = None", "if zone is not None: return zone.inventory_manager return return _zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None): if", "get_hide_from_lot_picker(*_, **__): pass @staticmethod def is_event_enabled(*_, **__): pass invite_sims_to_zone = _zone.invite_sims_to_zone get_house_description_id =", "current_zone() if zone is not None: return household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False): zone = current_zone()", "client = client_manager().get_first_client() if client is not None: return client.active_sim def active_sim_info(): client", "not None: return _zone_manager.current_zone def current_zone_id(): if _zone_manager is not None: return sims4.zone_utils.zone_id", "zone.posture_graph_service return return _zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None): if zone_id is None: return current_zone().sim_spawner_service return", "try: import _zone except ImportError: class _zone: @staticmethod def invite_sims_to_zone(*_, **__): pass @staticmethod", "= sims4.log.Logger('Services') time_delta = None gc_collection_enable = True class TimeStampService(sims4.service_manager.Service): def start(self): global", "from loading start is {}'.format(time_delta)) return True def start_global_services(initial_ticks): global _account_service, _zone_manager, _distributor_service,", "None definition_manager = None snippet_manager = None _terrain_object = None _object_leak_tracker = None", "return current_zone().conditional_layer_service def get_sickness_service(): return game_services.service_manager.sickness_service def get_curfew_service(): return game_services.service_manager.curfew_service def get_locale(): client", "def sim_filter_service(zone_id=None): if zone_id is None: return current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service def get_photography_service(): return", "from services.persistence_service import PersistenceService _persistence_service = PersistenceService() return _persistence_service def get_distributor_service(): return _distributor_service", "if zone is not None: household = household_manager().get(zone.lot.owner_household_id) if household is not None:", "ImportError: class _zone: @staticmethod def invite_sims_to_zone(*_, **__): pass @staticmethod def get_house_description_id(*_, **__): pass", "= AccountService() _zone_manager = ZoneManager() _distributor_service = DistributorService() _intern_service = InternService() init_critical_services =", "import InternService from server.account_service import AccountService from services.persistence_service import PersistenceService from services.terrain_service import", "object_preference_tracker(require_active_household=False): zone = current_zone() if zone is not None: household = household_manager().get(zone.lot.owner_household_id) if", "def organization_service(): return getattr(game_services.service_manager, 'organization_service', None) def get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service def street_service(): return", "def weather_service(): return getattr(game_services.service_manager, 'weather_service', None) def season_service(): return getattr(game_services.service_manager, 'season_service', None) def", "_zone_manager, _distributor_service, _intern_service create_server_clock(initial_ticks) from distributor.distributor_service import DistributorService from intern_service import InternService from", "from performance.object_leak_tracker import ObjectLeakTracker if _object_leak_tracker is None: _object_leak_tracker = ObjectLeakTracker() if start:", "return current_zone().calendar_service def get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service def holiday_service(): return getattr(game_services.service_manager, 'holiday_service', None) def", "household is not None: if require_active_household and not household.is_active_household: return return household.object_preference_tracker travel_group", "current_zone().calendar_service def get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service def holiday_service(): return getattr(game_services.service_manager, 'holiday_service', None) def global_policy_service():", "services.terrain_service import TerrainService from sims4.tuning.serialization import FinalizeTuningService from zone_manager import ZoneManager parser =", "travel_group_manager(zone_id=None): if zone_id is None: zone = current_zone() if zone is not None:", "if gc_collection_enable: gc.disable() production_logger.info('GC disabled') gc_collection_enable = False else: gc.enable() production_logger.info('GC enabled') gc_collection_enable", "get_object_leak_tracker(): return _object_leak_tracker def get_zone_manager(): return _zone_manager def current_zone(): if _zone_manager is not", "getattr(game_services.service_manager, 'roommate_service', None) def get_club_service(): return getattr(game_services.service_manager, 'club_service', None) def get_culling_service(): return current_zone().culling_service", "= _zone_manager.get(zone_id) if zone is not None: return zone.prop_manager def social_group_manager(): return current_zone().social_group_manager", "def get_world_and_lot_description_id_from_zone_id(*_, **__): pass @staticmethod def get_is_eco_footprint_compatible_for_world_description(*_, **__): return False @staticmethod def get_hide_from_lot_picker(*_,", "_server_clock_service = clock.ServerClock(ticks=initial_ticks) def get_master_controller(): return current_zone().master_controller def get_persistence_service(): global _persistence_service if _persistence_service", "def get_locale(): client = get_first_client() return client.account.locale def relationship_service(): return game_services.service_manager.relationship_service def hidden_sim_service():", "init_critical_services = [server_clock_service(), get_persistence_service()] services = [_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers = []", "None: return zone.posture_graph_service return return _zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None): if zone_id is None: return", "return current_zone().locator_manager def object_manager(zone_id=None): if zone_id is None: zone = current_zone() else: zone", "_terrain_service if _terrain_service is None: from services.terrain_service import TerrainService _terrain_service = TerrainService() return", "pass @staticmethod def is_event_enabled(*_, **__): pass invite_sims_to_zone = _zone.invite_sims_to_zone get_house_description_id = _zone.get_house_description_id is_event_enabled", "_zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service def ensemble_service(): return current_zone().ensemble_service def sim_filter_service(zone_id=None): if zone_id", "current_zone() if zone is not None: household = household_manager().get(zone.lot.owner_household_id) if household is not", "_zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker = _zone.get_hide_from_lot_picker with sims4.reload.protected(globals()): tuning_managers = InstanceTuningManagers() get_instance_manager", "time_delta = time_stamp else: time_delta = time_stamp - time_delta production_logger.info('Time delta from loading", "is None: from services.persistence_service import PersistenceService _persistence_service = PersistenceService() return _persistence_service def get_distributor_service():", "client is not None: return client.active_sim def active_sim_info(): client = client_manager().get_first_client() if client", "if zone is not None: return zone.lot.owner_household_id def owning_household_of_active_lot(): zone = current_zone() if", "client is not None: return client.household def active_household_id(): client = client_manager().get_first_client() if client", "import time from services.tuning_managers import InstanceTuningManagers from sims4.resources import INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager import", "= active_lot() if lot is not None: return lot.lot_id def client_object_managers(): if game_services.service_manager", "= current_zone() if zone is not None: return zone.posture_graph_service return return _zone_manager.get(zone_id).posture_graph_service def", "'landlord_service', None) def get_roommate_service(): return getattr(game_services.service_manager, 'roommate_service', None) def get_club_service(): return getattr(game_services.service_manager, 'club_service',", "definition in INSTANCE_TUNING_DEFINITIONS: accessor_name = definition.manager_name accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name] = accessor", "_intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers = [] for definition in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService,", "def is_event_enabled(*_, **__): pass invite_sims_to_zone = _zone.invite_sims_to_zone get_house_description_id = _zone.get_house_description_id is_event_enabled = _zone.is_event_enabled", "None: return zone.inventory_manager return return _zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None): if zone_id is None: zone", "zone.inventory_manager return return _zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None): if zone_id is None: zone = current_zone()", "{}'.format(time_stamp)) logger.info('TimeStampService start at {}'.format(time_stamp)) if time_delta is None: time_delta = time_stamp else:", "**__): pass @staticmethod def get_house_description_id(*_, **__): pass @staticmethod def get_building_type(*_, **__): return 0", "def get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service def street_service(): return getattr(game_services.service_manager, 'street_service', None) def c_api_gsi_dump(): import", "current_zone().ui_dialog_service def config_service(): return game_services.service_manager.config_service def travel_service(): return current_zone().travel_service def sim_quadtree(): return current_zone().sim_quadtree", "get_demographics_service(): return current_zone().demographics_service def get_service_npc_service(): return current_zone().service_npc_service def conditional_layer_service(): return current_zone().conditional_layer_service def get_sickness_service():", "get_culling_service(): return current_zone().culling_service def get_gardening_service(): return current_zone().gardening_service def drama_scheduler_service(): return current_zone().drama_schedule_service def get_plex_service():", "= client_manager().get_first_client() if client is not None: return client.active_sim def active_sim_info(): client =", "zone.laundry_service def get_object_routing_service(): zone = current_zone() if zone is not None and hasattr(zone,", "= [server_clock_service(), get_persistence_service()] services = [_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers = [] for", "definition in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services, services)", "gc_collection_enable, time_delta if gc_collection_enable: gc.disable() production_logger.info('GC disabled') gc_collection_enable = False else: gc.enable() production_logger.info('GC", "return () def sim_info_manager(): return game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None): if zone_id is None: zone", "client = client_manager().get_first_client() if client is not None: return client.household_id def active_household_lot_id(): household", "not None: return zone.travel_group_manager return return _zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None): if household_id: return get_utilities_manager_by_household_id(household_id)", "= ObjectLeakTracker() if start: _object_leak_tracker.start_tracking() return True return False def get_object_leak_tracker(): return _object_leak_tracker", "@staticmethod def get_world_and_lot_description_id_from_zone_id(*_, **__): pass @staticmethod def get_is_eco_footprint_compatible_for_world_description(*_, **__): return False @staticmethod def", "paths import sims4.reload import sims4.service_manager try: import _zone except ImportError: class _zone: @staticmethod", "_intern_service, _intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers = [] for definition in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService,", "**__): pass @staticmethod def get_is_eco_footprint_compatible_for_world_description(*_, **__): return False @staticmethod def get_hide_from_lot_picker(*_, **__): pass", "and hasattr(zone, 'laundry_service'): return zone.laundry_service def get_object_routing_service(): zone = current_zone() if zone is", "return get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def", "get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service(): return current_zone().ui_dialog_service def config_service():", "not None: return zone.lot.owner_household_id def owning_household_of_active_lot(): zone = current_zone() if zone is not", "get_world_description_id(*_, **__): pass @staticmethod def get_world_id(*_, **__): pass @staticmethod def get_world_and_lot_description_id_from_zone_id(*_, **__): pass", "if zone_id is None: return current_zone().sim_filter_service return _zone_manager.get(zone_id).sim_filter_service def get_photography_service(): return current_zone().photography_service def", "return game_services.service_manager.client_object_managers return () def sim_info_manager(): return game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None): if zone_id is", "is not None: return zone.lot.owner_household_id def owning_household_of_active_lot(): zone = current_zone() if zone is", "None) def narrative_service(): return getattr(game_services.service_manager, 'narrative_service', None) def organization_service(): return getattr(game_services.service_manager, 'organization_service', None)", "return _zone_manager.current_zone def current_zone_id(): if _zone_manager is not None: return sims4.zone_utils.zone_id def current_zone_info():", "game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu(): pass def account_service(): return _account_service def business_service(): bs =", "game_services.service_manager.household_manager def travel_group_manager(zone_id=None): if zone_id is None: zone = current_zone() if zone is", "return _zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None): if zone_id is None: zone = current_zone() else: zone", "def get_object_routing_service(): zone = current_zone() if zone is not None and hasattr(zone, 'object_routing_service'):", "None) def season_service(): return getattr(game_services.service_manager, 'season_service', None) def lot_decoration_service(): return getattr(game_services.service_manager, 'lot_decoration_service', None)", "return getattr(game_services.service_manager, 'venue_game_service', None) def zone_spin_up_service(): return current_zone().zone_spin_up_service def household_manager(): return game_services.service_manager.household_manager def", "global _account_service, _zone_manager, _distributor_service, _intern_service create_server_clock(initial_ticks) from distributor.distributor_service import DistributorService from intern_service import", "0 @staticmethod def get_eco_footprint_value(*_, **__): return 0 @staticmethod def get_rent(*_, **__): return 0", "return zone.laundry_service def get_object_routing_service(): zone = current_zone() if zone is not None and", "not travel_group.is_active_sim_in_travel_group: return else: return travel_group.object_preference_tracker def get_active_sim(): client = client_manager().get_first_client() if client", "def get_zone_modifier_service(): return current_zone().zone_modifier_service def get_demographics_service(): return current_zone().demographics_service def get_service_npc_service(): return current_zone().service_npc_service def", "None _server_clock_service = None _persistence_service = None _distributor_service = None _intern_service = None", "None: return client.active_sim_info def active_household(): client = client_manager().get_first_client() if client is not None:", "venue_game_service(): return getattr(game_services.service_manager, 'venue_game_service', None) def zone_spin_up_service(): return current_zone().zone_spin_up_service def household_manager(): return game_services.service_manager.household_manager", "get_object_routing_service(): zone = current_zone() if zone is not None and hasattr(zone, 'object_routing_service'): return", "return game_services.service_manager.relationship_service def hidden_sim_service(): return game_services.service_manager.hidden_sim_service def weather_service(): return getattr(game_services.service_manager, 'weather_service', None) def", "= accessor production_logger = sims4.log.ProductionLogger('Services') logger = sims4.log.Logger('Services') time_delta = None gc_collection_enable =", "lot = home_zone.lot if lot is not None: return lot.lot_id def privacy_service(): return", "clock.ServerClock(ticks=initial_ticks) def get_master_controller(): return current_zone().master_controller def get_persistence_service(): global _persistence_service if _persistence_service is None:", "else: return travel_group.object_preference_tracker def get_active_sim(): client = client_manager().get_first_client() if client is not None:", "**__): return 0 @staticmethod def get_lot_description_id(*_, **__): pass @staticmethod def get_world_description_id(*_, **__): pass", "None: return zone.object_manager def inventory_manager(zone_id=None): if zone_id is None: zone = current_zone() if", "_account_service, _event_manager, _server_clock_service, _persistence_service, _terrain_service, _distributor_service, _intern_service, _object_leak_tracker _zone_manager.shutdown() _zone_manager = None tuning_managers.clear()", "TerrainService from sims4.tuning.serialization import FinalizeTuningService from zone_manager import ZoneManager parser = argparse.ArgumentParser() parser.add_argument('--python_autoleak',", "_object_leak_tracker = None for definition in INSTANCE_TUNING_DEFINITIONS: accessor_name = definition.manager_name accessor = functools.partial(tuning_managers.__getitem__,", "PersistenceService _persistence_service = PersistenceService() return _persistence_service def get_distributor_service(): return _distributor_service def get_fire_service(): return", "tuning_managers.__getitem__ _account_service = None _zone_manager = None _server_clock_service = None _persistence_service = None", "if args.python_autoleak: create_object_leak_tracker() _account_service = AccountService() _zone_manager = ZoneManager() _distributor_service = DistributorService() _intern_service", "config_service(): return game_services.service_manager.config_service def travel_service(): return current_zone().travel_service def sim_quadtree(): return current_zone().sim_quadtree def single_part_condition_list():", "return _distributor_service def get_fire_service(): return current_zone().fire_service def get_career_service(): return current_zone().career_service def get_story_progression_service(): return", "social_group_manager(): return current_zone().social_group_manager def client_manager(): return game_services.service_manager.client_manager def get_first_client(): return client_manager().get_first_client() def get_selectable_sims():", "get_style_service(): return game_services.service_manager.style_service def get_tutorial_service(): return game_services.service_manager.tutorial_service def calendar_service(): return current_zone().calendar_service def get_rabbit_hole_service():", "get_cheat_service(): return game_services.service_manager.cheat_service def neighborhood_population_service(): return current_zone().neighborhood_population_service def get_reset_and_delete_service(): return current_zone().reset_and_delete_service def venue_service():", "= current_zone() if zone is not None: household = household_manager().get(zone.lot.owner_household_id) if household is", "if time_delta is None: time_delta = time_stamp else: time_delta = time_stamp - time_delta", "zone_id is None: zone = current_zone() if zone is not None: return zone.posture_graph_service", "is not None: return zone.lot def active_lot_id(): lot = active_lot() if lot is", "sims4.service_manager try: import _zone except ImportError: class _zone: @staticmethod def invite_sims_to_zone(*_, **__): pass", "def invite_sims_to_zone(*_, **__): pass @staticmethod def get_house_description_id(*_, **__): pass @staticmethod def get_building_type(*_, **__):", "get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker = _zone.get_hide_from_lot_picker with sims4.reload.protected(globals()): tuning_managers = InstanceTuningManagers() get_instance_manager =", "time.time() production_logger.info('TimeStampService start at {}'.format(time_stamp)) logger.info('TimeStampService start at {}'.format(time_stamp)) if time_delta is None:", "= None _object_leak_tracker = None for definition in INSTANCE_TUNING_DEFINITIONS: accessor_name = definition.manager_name accessor", "None and hasattr(zone, 'object_routing_service'): return zone.object_routing_service def get_landlord_service(): return getattr(game_services.service_manager, 'landlord_service', None) def", "False else: gc.enable() production_logger.info('GC enabled') gc_collection_enable = True time_stamp = time.time() production_logger.info('TimeStampService start", "not None: return zone.lot def active_lot_id(): lot = active_lot() if lot is not", "return household.object_preference_tracker travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group is not None: if require_active_household and", "False @staticmethod def get_hide_from_lot_picker(*_, **__): pass @staticmethod def is_event_enabled(*_, **__): pass invite_sims_to_zone =", "_persistence_service = None _terrain_service = None _distributor_service = None _intern_service = None if", "game_services.service_manager.tutorial_service def calendar_service(): return current_zone().calendar_service def get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service def holiday_service(): return getattr(game_services.service_manager,", "definition.manager_name accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name] = accessor production_logger = sims4.log.ProductionLogger('Services') logger =", "client_manager().get_first_client() def get_selectable_sims(): return get_first_client().selectable_sims def owning_household_id_of_active_lot(): zone = current_zone() if zone is", "start is {}'.format(time_delta)) return True def start_global_services(initial_ticks): global _account_service, _zone_manager, _distributor_service, _intern_service create_server_clock(initial_ticks)", "and hasattr(zone, 'object_routing_service'): return zone.object_routing_service def get_landlord_service(): return getattr(game_services.service_manager, 'landlord_service', None) def get_roommate_service():", "return False @staticmethod def get_hide_from_lot_picker(*_, **__): pass @staticmethod def is_event_enabled(*_, **__): pass invite_sims_to_zone", "is not None: return household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False): zone = current_zone() if zone is", "def get_intern_service(): return _intern_service def get_zone_situation_manager(zone_id=None): if zone_id is None: return current_zone().situation_manager return", "def narrative_service(): return getattr(game_services.service_manager, 'narrative_service', None) def organization_service(): return getattr(game_services.service_manager, 'organization_service', None) def", "travel_group.object_preference_tracker def get_active_sim(): client = client_manager().get_first_client() if client is not None: return client.active_sim", "import paths import sims4.reload import sims4.service_manager try: import _zone except ImportError: class _zone:", "current_zone().locator_manager def object_manager(zone_id=None): if zone_id is None: zone = current_zone() else: zone =", "from zone_manager import ZoneManager parser = argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False, action='store_true') (args, unused_args) =", "_zone.invite_sims_to_zone get_house_description_id = _zone.get_house_description_id is_event_enabled = _zone.is_event_enabled get_building_type = _zone.get_building_type get_eco_footprint_value = _zone.get_eco_footprint_value", "return return _zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None): if zone_id is None: return current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service", "import PersistenceService _persistence_service = PersistenceService() return _persistence_service def get_distributor_service(): return _distributor_service def get_fire_service():", "get_active_sim(): client = client_manager().get_first_client() if client is not None: return client.active_sim def active_sim_info():", "getattr(game_services.service_manager, 'narrative_service', None) def organization_service(): return getattr(game_services.service_manager, 'organization_service', None) def get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service", "return zone.travel_group_manager return return _zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None): if household_id: return get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id())", "def config_service(): return game_services.service_manager.config_service def travel_service(): return current_zone().travel_service def sim_quadtree(): return current_zone().sim_quadtree def", "conditional_layer_service(): return current_zone().conditional_layer_service def get_sickness_service(): return game_services.service_manager.sickness_service def get_curfew_service(): return game_services.service_manager.curfew_service def get_locale():", "def posture_graph_service(zone_id=None): if zone_id is None: zone = current_zone() if zone is not", "from sims4.resources import INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager import TuningInstanceManager from sims4.tuning.tunable import Tunable, TunableReference", "pass @staticmethod def get_world_and_lot_description_id_from_zone_id(*_, **__): pass @staticmethod def get_is_eco_footprint_compatible_for_world_description(*_, **__): return False @staticmethod", "unused_args) = parser.parse_known_args() if args.python_autoleak: create_object_leak_tracker() _account_service = AccountService() _zone_manager = ZoneManager() _distributor_service", "is None: from services.terrain_service import TerrainService _terrain_service = TerrainService() return _terrain_service def call_to_action_service():", "from services.terrain_service import TerrainService _terrain_service = TerrainService() return _terrain_service def call_to_action_service(): return game_services.service_manager.call_to_action_service", "zone.travel_group_manager return return _zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None): if household_id: return get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id()) def", "ZoneManager parser = argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False, action='store_true') (args, unused_args) = parser.parse_known_args() if args.python_autoleak:", "def get_sickness_service(): return game_services.service_manager.sickness_service def get_curfew_service(): return game_services.service_manager.curfew_service def get_locale(): client = get_first_client()", "return get_first_client().selectable_sims def owning_household_id_of_active_lot(): zone = current_zone() if zone is not None: return", "def sim_quadtree(): return current_zone().sim_quadtree def single_part_condition_list(): return current_zone().single_part_condition_list def multi_part_condition_list(): return current_zone().multi_part_condition_list def", "def get_adoption_service(): return current_zone().adoption_service def get_laundry_service(): zone = current_zone() if zone is not", "game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None): if zone_id is None: zone = current_zone() if zone is", "(args, unused_args) = parser.parse_known_args() if args.python_autoleak: create_object_leak_tracker() _account_service = AccountService() _zone_manager = ZoneManager()", "def household_manager(): return game_services.service_manager.household_manager def travel_group_manager(zone_id=None): if zone_id is None: zone = current_zone()", "def season_service(): return getattr(game_services.service_manager, 'season_service', None) def lot_decoration_service(): return getattr(game_services.service_manager, 'lot_decoration_service', None) def", "season_service(): return getattr(game_services.service_manager, 'season_service', None) def lot_decoration_service(): return getattr(game_services.service_manager, 'lot_decoration_service', None) def get_style_service():", "= definition.manager_name accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name] = accessor production_logger = sims4.log.ProductionLogger('Services') logger", "zone = current_zone() if zone is not None: return zone.daycare_service def get_adoption_service(): return", "services = [_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers = [] for definition in INSTANCE_TUNING_DEFINITIONS:", "zone is not None: return zone.posture_graph_service return return _zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None): if zone_id", "return current_zone().multi_part_condition_list def get_event_manager(): return game_services.service_manager.event_manager_service def get_current_venue(): service = venue_service() if service", "_zone_manager.get(zone_id).sim_spawner_service def locator_manager(): return current_zone().locator_manager def object_manager(zone_id=None): if zone_id is None: zone =", "None: return zone.lot.owner_household_id def owning_household_of_active_lot(): zone = current_zone() if zone is not None:", "= None for definition in INSTANCE_TUNING_DEFINITIONS: accessor_name = definition.manager_name accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE)", "def lot_decoration_service(): return getattr(game_services.service_manager, 'lot_decoration_service', None) def get_style_service(): return game_services.service_manager.style_service def get_tutorial_service(): return", "zone is not None: return zone.lot def active_lot_id(): lot = active_lot() if lot", "game_services.service_manager.call_to_action_service def trend_service(): return game_services.service_manager.trend_service def time_service(): return game_services.service_manager.time_service def game_clock_service(): return game_services.service_manager.game_clock", "not None: return zone.object_manager def inventory_manager(zone_id=None): if zone_id is None: zone = current_zone()", "import functools import gc import time from services.tuning_managers import InstanceTuningManagers from sims4.resources import", "return client.household_id def active_household_lot_id(): household = active_household() if household is not None: home_zone", "zone = current_zone() return zone.get_zone_info() def current_region(): zone = current_zone() if zone is", "None: return _zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot(): zone = current_zone() if zone is not", "def get_eco_footprint_value(*_, **__): return 0 @staticmethod def get_rent(*_, **__): return 0 @staticmethod def", "pass @staticmethod def get_is_eco_footprint_compatible_for_world_description(*_, **__): return False @staticmethod def get_hide_from_lot_picker(*_, **__): pass @staticmethod", "in INSTANCE_TUNING_DEFINITIONS: instantiated_tuning_managers.append(tuning_managers[definition.TYPE_ENUM_VALUE]) services.append(TuningInstanceManager(instantiated_tuning_managers)) services.extend([FinalizeTuningService, TimeStampService, _intern_service.get_stop_interning(), get_terrain_service(), _zone_manager, _account_service]) sims4.core_services.start_services(init_critical_services, services) def", "return current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager def npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service def ensemble_service(): return current_zone().ensemble_service def", "get_plex_service(): return current_zone().plex_service def get_door_service(): return current_zone().door_service def get_zone_modifier_service(): return current_zone().zone_modifier_service def get_demographics_service():", "return client.active_sim def active_sim_info(): client = client_manager().get_first_client() if client is not None: return", "_zone.get_lot_description_id get_world_description_id = _zone.get_world_description_id get_world_id = _zone.get_world_id get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description", "get_building_type = _zone.get_building_type get_eco_footprint_value = _zone.get_eco_footprint_value get_rent = _zone.get_rent get_lot_description_id = _zone.get_lot_description_id get_world_description_id", "None) def get_style_service(): return game_services.service_manager.style_service def get_tutorial_service(): return game_services.service_manager.tutorial_service def calendar_service(): return current_zone().calendar_service", "None _object_leak_tracker = None for definition in INSTANCE_TUNING_DEFINITIONS: accessor_name = definition.manager_name accessor =", "_account_service def business_service(): bs = game_services.service_manager.business_service return bs def get_terrain_service(): global _terrain_service if", "get_distributor_service(): return _distributor_service def get_fire_service(): return current_zone().fire_service def get_career_service(): return current_zone().career_service def get_story_progression_service():", "None: return client.household def active_household_id(): client = client_manager().get_first_client() if client is not None:", "current_zone(): if _zone_manager is not None: return _zone_manager.current_zone def current_zone_id(): if _zone_manager is", "True time_stamp = time.time() production_logger.info('TimeStampService start at {}'.format(time_stamp)) logger.info('TimeStampService start at {}'.format(time_stamp)) if", "service.active_venue def get_intern_service(): return _intern_service def get_zone_situation_manager(zone_id=None): if zone_id is None: return current_zone().situation_manager", "calendar_service(): return current_zone().calendar_service def get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service def holiday_service(): return getattr(game_services.service_manager, 'holiday_service', None)", "current_zone().culling_service def get_gardening_service(): return current_zone().gardening_service def drama_scheduler_service(): return current_zone().drama_schedule_service def get_plex_service(): return current_zone().plex_service", "None) def get_object_lost_and_found_service(): return game_services.service_manager.object_lost_and_found_service def street_service(): return getattr(game_services.service_manager, 'street_service', None) def c_api_gsi_dump():", "return False def get_object_leak_tracker(): return _object_leak_tracker def get_zone_manager(): return _zone_manager def current_zone(): if", "is None: _object_leak_tracker = ObjectLeakTracker() if start: _object_leak_tracker.start_tracking() return True return False def", "not None and hasattr(zone, 'object_routing_service'): return zone.object_routing_service def get_landlord_service(): return getattr(game_services.service_manager, 'landlord_service', None)", "_zone_manager is not None: return _zone_manager.current_zone def current_zone_id(): if _zone_manager is not None:", "gc_collection_enable: gc.disable() production_logger.info('GC disabled') gc_collection_enable = False else: gc.enable() production_logger.info('GC enabled') gc_collection_enable =", "_object_leak_tracker from performance.object_leak_tracker import ObjectLeakTracker if _object_leak_tracker is None: _object_leak_tracker = ObjectLeakTracker() if", "= get_first_client() return client.account.locale def relationship_service(): return game_services.service_manager.relationship_service def hidden_sim_service(): return game_services.service_manager.hidden_sim_service def", "= None def create_object_leak_tracker(start=False): global _object_leak_tracker from performance.object_leak_tracker import ObjectLeakTracker if _object_leak_tracker is", "not None: return zone.region def current_street(): zone = current_zone() if zone is not", "on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu(): pass def account_service(): return _account_service", "**__): return False @staticmethod def get_hide_from_lot_picker(*_, **__): pass @staticmethod def is_event_enabled(*_, **__): pass", "current_zone().gardening_service def drama_scheduler_service(): return current_zone().drama_schedule_service def get_plex_service(): return current_zone().plex_service def get_door_service(): return current_zone().door_service", "- time_delta production_logger.info('Time delta from loading start is {}'.format(time_delta)) logger.info('Time delta from loading", "getattr(game_services.service_manager, 'weather_service', None) def season_service(): return getattr(game_services.service_manager, 'season_service', None) def lot_decoration_service(): return getattr(game_services.service_manager,", "_zone_manager.get(zone_id, allow_uninstantiated_zones=allow_uninstantiated_zones) def active_lot(): zone = current_zone() if zone is not None: return", "zone_manager import ZoneManager parser = argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False, action='store_true') (args, unused_args) = parser.parse_known_args()", "ZoneManager() _distributor_service = DistributorService() _intern_service = InternService() init_critical_services = [server_clock_service(), get_persistence_service()] services =", "None: _object_leak_tracker = None def create_object_leak_tracker(start=False): global _object_leak_tracker from performance.object_leak_tracker import ObjectLeakTracker if", "utilities_manager(household_id=None): if household_id: return get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id):", "else: gc.enable() production_logger.info('GC enabled') gc_collection_enable = True time_stamp = time.time() production_logger.info('TimeStampService start at", "if home_zone is not None: lot = home_zone.lot if lot is not None:", "client.account.locale def relationship_service(): return game_services.service_manager.relationship_service def hidden_sim_service(): return game_services.service_manager.hidden_sim_service def weather_service(): return getattr(game_services.service_manager,", "is not None: return service.active_venue def get_intern_service(): return _intern_service def get_zone_situation_manager(zone_id=None): if zone_id", "return current_zone().zone_modifier_service def get_demographics_service(): return current_zone().demographics_service def get_service_npc_service(): return current_zone().service_npc_service def conditional_layer_service(): return", "npc_hosted_situation_service(): return current_zone().n_p_c_hosted_situation_service def ensemble_service(): return current_zone().ensemble_service def sim_filter_service(zone_id=None): if zone_id is None:", "return 0 @staticmethod def get_lot_description_id(*_, **__): pass @staticmethod def get_world_description_id(*_, **__): pass @staticmethod", "not None: return lot.lot_id def client_object_managers(): if game_services.service_manager is not None: return game_services.service_manager.client_object_managers", "return return _zone_manager.get(zone_id).travel_group_manager def utilities_manager(household_id=None): if household_id: return get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id):", "def trend_service(): return game_services.service_manager.trend_service def time_service(): return game_services.service_manager.time_service def game_clock_service(): return game_services.service_manager.game_clock def", "get_persistence_service()] services = [_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers = [] for definition in", "zone = current_zone() else: zone = _zone_manager.get(zone_id) if zone is not None: return", "= _zone.get_building_type get_eco_footprint_value = _zone.get_eco_footprint_value get_rent = _zone.get_rent get_lot_description_id = _zone.get_lot_description_id get_world_description_id =", "def venue_service(): return current_zone().venue_service def venue_game_service(): return getattr(game_services.service_manager, 'venue_game_service', None) def zone_spin_up_service(): return", "game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service(): return current_zone().ui_dialog_service def config_service(): return game_services.service_manager.config_service", "def relationship_service(): return game_services.service_manager.relationship_service def hidden_sim_service(): return game_services.service_manager.hidden_sim_service def weather_service(): return getattr(game_services.service_manager, 'weather_service',", "if zone is not None: return zone.region def current_street(): zone = current_zone() if", "on_enter_main_menu(): pass def account_service(): return _account_service def business_service(): bs = game_services.service_manager.business_service return bs", "current_zone() else: zone = _zone_manager.get(zone_id) if zone is not None: return zone.object_manager def", "travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group is not None: if require_active_household and not travel_group.is_active_sim_in_travel_group:", "return else: return travel_group.object_preference_tracker def get_active_sim(): client = client_manager().get_first_client() if client is not", "game_services.service_manager.relationship_service def hidden_sim_service(): return game_services.service_manager.hidden_sim_service def weather_service(): return getattr(game_services.service_manager, 'weather_service', None) def season_service():", "get_house_description_id(*_, **__): pass @staticmethod def get_building_type(*_, **__): return 0 @staticmethod def get_eco_footprint_value(*_, **__):", "None: return return _server_clock_service def create_server_clock(initial_ticks): global _server_clock_service import clock _server_clock_service = clock.ServerClock(ticks=initial_ticks)", "def sim_info_manager(): return game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None): if zone_id is None: zone = current_zone()", "not None: return household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False): zone = current_zone() if zone is not", "{}'.format(time_stamp)) if time_delta is None: time_delta = time_stamp else: time_delta = time_stamp -", "with sims4.reload.protected(globals()): tuning_managers = InstanceTuningManagers() get_instance_manager = tuning_managers.__getitem__ _account_service = None _zone_manager =", "is None: return return _server_clock_service def create_server_clock(initial_ticks): global _server_clock_service import clock _server_clock_service =", "class _zone: @staticmethod def invite_sims_to_zone(*_, **__): pass @staticmethod def get_house_description_id(*_, **__): pass @staticmethod", "def get_world_description_id(*_, **__): pass @staticmethod def get_world_id(*_, **__): pass @staticmethod def get_world_and_lot_description_id_from_zone_id(*_, **__):", "zone = current_zone() if zone is not None: return zone.inventory_manager return return _zone_manager.get(zone_id).inventory_manager", "sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown: game_services.service_manager.on_client_disconnect(client) current_zone().service_manager.on_client_disconnect(client) def on_enter_main_menu(): pass def account_service(): return _account_service def", "def get_roommate_service(): return getattr(game_services.service_manager, 'roommate_service', None) def get_club_service(): return getattr(game_services.service_manager, 'club_service', None) def", "= None _terrain_service = None definition_manager = None snippet_manager = None _terrain_object =", "return getattr(game_services.service_manager, 'lot_decoration_service', None) def get_style_service(): return game_services.service_manager.style_service def get_tutorial_service(): return game_services.service_manager.tutorial_service def", "from distributor.distributor_service import DistributorService from intern_service import InternService from server.account_service import AccountService from", "_event_manager, _server_clock_service, _persistence_service, _terrain_service, _distributor_service, _intern_service, _object_leak_tracker _zone_manager.shutdown() _zone_manager = None tuning_managers.clear() _account_service", "global_policy_service(): return getattr(game_services.service_manager, 'global_policy_service', None) def narrative_service(): return getattr(game_services.service_manager, 'narrative_service', None) def organization_service():", "hidden_sim_service(): return game_services.service_manager.hidden_sim_service def weather_service(): return getattr(game_services.service_manager, 'weather_service', None) def season_service(): return getattr(game_services.service_manager,", "get_zone(household.home_zone_id) if home_zone is not None: lot = home_zone.lot if lot is not", "def daycare_service(): zone = current_zone() if zone is not None: return zone.daycare_service def", "is not None: _object_leak_tracker = None def create_object_leak_tracker(start=False): global _object_leak_tracker from performance.object_leak_tracker import", "if service is not None: return service.active_venue def get_intern_service(): return _intern_service def get_zone_situation_manager(zone_id=None):", "= _zone.get_rent get_lot_description_id = _zone.get_lot_description_id get_world_description_id = _zone.get_world_description_id get_world_id = _zone.get_world_id get_world_and_lot_description_id_from_zone_id =", "time_stamp = time.time() production_logger.info('TimeStampService start at {}'.format(time_stamp)) logger.info('TimeStampService start at {}'.format(time_stamp)) if time_delta", "def call_to_action_service(): return game_services.service_manager.call_to_action_service def trend_service(): return game_services.service_manager.trend_service def time_service(): return game_services.service_manager.time_service def", "get_world_id(*_, **__): pass @staticmethod def get_world_and_lot_description_id_from_zone_id(*_, **__): pass @staticmethod def get_is_eco_footprint_compatible_for_world_description(*_, **__): return", "= current_zone() return zone.get_zone_info() def current_region(): zone = current_zone() if zone is not", "_zone_manager is not None: return sims4.zone_utils.zone_id def current_zone_info(): zone = current_zone() return zone.get_zone_info()", "return _persistence_service def get_distributor_service(): return _distributor_service def get_fire_service(): return current_zone().fire_service def get_career_service(): return", "= argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False, action='store_true') (args, unused_args) = parser.parse_known_args() if args.python_autoleak: create_object_leak_tracker() _account_service", "global _persistence_service if _persistence_service is None: from services.persistence_service import PersistenceService _persistence_service = PersistenceService()", "= _zone.get_world_description_id get_world_id = _zone.get_world_id get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker =", "None: return lot.lot_id def client_object_managers(): if game_services.service_manager is not None: return game_services.service_manager.client_object_managers return", "get_fire_service(): return current_zone().fire_service def get_career_service(): return current_zone().career_service def get_story_progression_service(): return current_zone().story_progression_service def daycare_service():", "def get_is_eco_footprint_compatible_for_world_description(*_, **__): return False @staticmethod def get_hide_from_lot_picker(*_, **__): pass @staticmethod def is_event_enabled(*_,", "PersistenceService from services.terrain_service import TerrainService from sims4.tuning.serialization import FinalizeTuningService from zone_manager import ZoneManager", "in INSTANCE_TUNING_DEFINITIONS: accessor_name = definition.manager_name accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name] = accessor production_logger", "= current_zone() if zone is not None: return zone.street def get_zone(zone_id, allow_uninstantiated_zones=False): if", "global gc_collection_enable, time_delta if gc_collection_enable: gc.disable() production_logger.info('GC disabled') gc_collection_enable = False else: gc.enable()", "_terrain_service = TerrainService() return _terrain_service def call_to_action_service(): return game_services.service_manager.call_to_action_service def trend_service(): return game_services.service_manager.trend_service", "None snippet_manager = None _terrain_object = None _object_leak_tracker = None for definition in", "_terrain_object = None _object_leak_tracker = None for definition in INSTANCE_TUNING_DEFINITIONS: accessor_name = definition.manager_name", "is None: zone = current_zone() if zone is not None: return zone.posture_graph_service return", "_zone.get_house_description_id is_event_enabled = _zone.is_event_enabled get_building_type = _zone.get_building_type get_eco_footprint_value = _zone.get_eco_footprint_value get_rent = _zone.get_rent", "= _zone_manager.get(zone_id) if zone is not None: return zone.object_manager def inventory_manager(zone_id=None): if zone_id", "None: return household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False): zone = current_zone() if zone is not None:", "current_zone() else: zone = _zone_manager.get(zone_id) if zone is not None: return zone.prop_manager def", "owning_household_id_of_active_lot(): zone = current_zone() if zone is not None: return zone.lot.owner_household_id def owning_household_of_active_lot():", "zone.lot.owner_household_id def owning_household_of_active_lot(): zone = current_zone() if zone is not None: return household_manager().get(zone.lot.owner_household_id)", "def get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service(): return current_zone().ui_dialog_service def", "client_object_managers(): if game_services.service_manager is not None: return game_services.service_manager.client_object_managers return () def sim_info_manager(): return", "active_lot() if lot is not None: return lot.lot_id def client_object_managers(): if game_services.service_manager is", "zone = current_zone() if zone is not None: return zone.region def current_street(): zone", "get_photography_service(): return current_zone().photography_service def social_group_cluster_service(): return current_zone().social_group_cluster_service def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def", "'roommate_service', None) def get_club_service(): return getattr(game_services.service_manager, 'club_service', None) def get_culling_service(): return current_zone().culling_service def", "import TerrainService from sims4.tuning.serialization import FinalizeTuningService from zone_manager import ZoneManager parser = argparse.ArgumentParser()", "return current_zone().sim_quadtree def single_part_condition_list(): return current_zone().single_part_condition_list def multi_part_condition_list(): return current_zone().multi_part_condition_list def get_event_manager(): return", "sims4.resources import INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager import TuningInstanceManager from sims4.tuning.tunable import Tunable, TunableReference import", "getattr(game_services.service_manager, 'season_service', None) def lot_decoration_service(): return getattr(game_services.service_manager, 'lot_decoration_service', None) def get_style_service(): return game_services.service_manager.style_service", "return game_services.service_manager.call_to_action_service def trend_service(): return game_services.service_manager.trend_service def time_service(): return game_services.service_manager.time_service def game_clock_service(): return", "get_adoption_service(): return current_zone().adoption_service def get_laundry_service(): zone = current_zone() if zone is not None", "is_event_enabled(*_, **__): pass invite_sims_to_zone = _zone.invite_sims_to_zone get_house_description_id = _zone.get_house_description_id is_event_enabled = _zone.is_event_enabled get_building_type", "return current_zone().demographics_service def get_service_npc_service(): return current_zone().service_npc_service def conditional_layer_service(): return current_zone().conditional_layer_service def get_sickness_service(): return", "**__): pass @staticmethod def get_building_type(*_, **__): return 0 @staticmethod def get_eco_footprint_value(*_, **__): return", "return game_services.service_manager.config_service def travel_service(): return current_zone().travel_service def sim_quadtree(): return current_zone().sim_quadtree def single_part_condition_list(): return", "owning_household_of_active_lot(): zone = current_zone() if zone is not None: return household_manager().get(zone.lot.owner_household_id) def object_preference_tracker(require_active_household=False):", "return game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None): if zone_id is None: zone = current_zone() if zone", "return current_zone().adoption_service def get_laundry_service(): zone = current_zone() if zone is not None and", "current_zone().social_group_manager def client_manager(): return game_services.service_manager.client_manager def get_first_client(): return client_manager().get_first_client() def get_selectable_sims(): return get_first_client().selectable_sims", "narrative_service(): return getattr(game_services.service_manager, 'narrative_service', None) def organization_service(): return getattr(game_services.service_manager, 'organization_service', None) def get_object_lost_and_found_service():", "return game_services.service_manager.sickness_service def get_curfew_service(): return game_services.service_manager.curfew_service def get_locale(): client = get_first_client() return client.account.locale", "import InstanceTuningManagers from sims4.resources import INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager import TuningInstanceManager from sims4.tuning.tunable import", "= None tuning_managers.clear() _account_service = None _event_manager = None _server_clock_service = None _persistence_service", "None gc_collection_enable = True class TimeStampService(sims4.service_manager.Service): def start(self): global gc_collection_enable, time_delta if gc_collection_enable:", "venue_service() if service is not None: return service.active_venue def get_intern_service(): return _intern_service def", "sim_info_manager(): return game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None): if zone_id is None: zone = current_zone() if", "is not None: return lot.lot_id def client_object_managers(): if game_services.service_manager is not None: return", "return _intern_service def get_zone_situation_manager(zone_id=None): if zone_id is None: return current_zone().situation_manager return _zone_manager.get(zone_id).situation_manager def", "sims4.tuning.serialization import FinalizeTuningService from zone_manager import ZoneManager parser = argparse.ArgumentParser() parser.add_argument('--python_autoleak', default=False, action='store_true')", "def active_lot(): zone = current_zone() if zone is not None: return zone.lot def", "services.tuning_managers import InstanceTuningManagers from sims4.resources import INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager import TuningInstanceManager from sims4.tuning.tunable", "else: zone = _zone_manager.get(zone_id) if zone is not None: return zone.object_manager def inventory_manager(zone_id=None):", "sims4.zone_utils.zone_id def current_zone_info(): zone = current_zone() return zone.get_zone_info() def current_region(): zone = current_zone()", "single_part_condition_list(): return current_zone().single_part_condition_list def multi_part_condition_list(): return current_zone().multi_part_condition_list def get_event_manager(): return game_services.service_manager.event_manager_service def get_current_venue():", "def get_zone_manager(): return _zone_manager def current_zone(): if _zone_manager is not None: return _zone_manager.current_zone", "if household is not None: if require_active_household and not household.is_active_household: return return household.object_preference_tracker", "get_persistence_service(): global _persistence_service if _persistence_service is None: from services.persistence_service import PersistenceService _persistence_service =", "distributor.distributor_service import DistributorService from intern_service import InternService from server.account_service import AccountService from services.persistence_service", "zone = current_zone() if zone is not None: return zone.street def get_zone(zone_id, allow_uninstantiated_zones=False):", "game_services.service_manager.client_object_managers return () def sim_info_manager(): return game_services.service_manager.sim_info_manager def posture_graph_service(zone_id=None): if zone_id is None:", "active_lot(): zone = current_zone() if zone is not None: return zone.lot def active_lot_id():", "None tuning_managers.clear() _account_service = None _event_manager = None _server_clock_service = None _persistence_service =", "import AccountService from services.persistence_service import PersistenceService from services.terrain_service import TerrainService from sims4.tuning.serialization import", "game_services.service_manager.curfew_service def get_locale(): client = get_first_client() return client.account.locale def relationship_service(): return game_services.service_manager.relationship_service def", "is not None: return client.household_id def active_household_lot_id(): household = active_household() if household is", "InternService() init_critical_services = [server_clock_service(), get_persistence_service()] services = [_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers =", "for definition in INSTANCE_TUNING_DEFINITIONS: accessor_name = definition.manager_name accessor = functools.partial(tuning_managers.__getitem__, definition.TYPE_ENUM_VALUE) globals()[accessor_name] =", "game_services.service_manager.trend_service def time_service(): return game_services.service_manager.time_service def game_clock_service(): return game_services.service_manager.game_clock def server_clock_service(): if _server_clock_service", "= None _distributor_service = None _intern_service = None _terrain_service = None definition_manager =", "return getattr(game_services.service_manager, 'weather_service', None) def season_service(): return getattr(game_services.service_manager, 'season_service', None) def lot_decoration_service(): return", "else: time_delta = time_stamp - time_delta production_logger.info('Time delta from loading start is {}'.format(time_delta))", "sim_spawner_service(zone_id=None): if zone_id is None: return current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service def locator_manager(): return current_zone().locator_manager", "get_zone_manager(): return _zone_manager def current_zone(): if _zone_manager is not None: return _zone_manager.current_zone def", "TuningInstanceManager from sims4.tuning.tunable import Tunable, TunableReference import game_services import paths import sims4.reload import", "_zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None): if zone_id is None: return current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service def locator_manager():", "not None: return zone.prop_manager def social_group_manager(): return current_zone().social_group_manager def client_manager(): return game_services.service_manager.client_manager def", "return current_zone().door_service def get_zone_modifier_service(): return current_zone().zone_modifier_service def get_demographics_service(): return current_zone().demographics_service def get_service_npc_service(): return", "global _server_clock_service import clock _server_clock_service = clock.ServerClock(ticks=initial_ticks) def get_master_controller(): return current_zone().master_controller def get_persistence_service():", "is not None: return lot.lot_id def privacy_service(): return current_zone().privacy_service def autonomy_service(): return current_zone().autonomy_service", "client.active_sim def active_sim_info(): client = client_manager().get_first_client() if client is not None: return client.active_sim_info", "None _server_clock_service = None _persistence_service = None _terrain_service = None _distributor_service = None", "current_zone() if zone is not None: return zone.daycare_service def get_adoption_service(): return current_zone().adoption_service def", "from intern_service import InternService from server.account_service import AccountService from services.persistence_service import PersistenceService from", "def time_service(): return game_services.service_manager.time_service def game_clock_service(): return game_services.service_manager.game_clock def server_clock_service(): if _server_clock_service is", "not None: return zone.inventory_manager return return _zone_manager.get(zone_id).inventory_manager def prop_manager(zone_id=None): if zone_id is None:", "= game_services.service_manager.business_service return bs def get_terrain_service(): global _terrain_service if _terrain_service is None: from", "pass @staticmethod def get_building_type(*_, **__): return 0 @staticmethod def get_eco_footprint_value(*_, **__): return 0", "= None _zone_manager = None _server_clock_service = None _persistence_service = None _distributor_service =", "is not None: return zone.object_manager def inventory_manager(zone_id=None): if zone_id is None: zone =", "return _zone_manager.get(zone_id).posture_graph_service def sim_spawner_service(zone_id=None): if zone_id is None: return current_zone().sim_spawner_service return _zone_manager.get(zone_id).sim_spawner_service def", "= _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker = _zone.get_hide_from_lot_picker with sims4.reload.protected(globals()): tuning_managers = InstanceTuningManagers()", "def travel_service(): return current_zone().travel_service def sim_quadtree(): return current_zone().sim_quadtree def single_part_condition_list(): return current_zone().single_part_condition_list def", "sims4.reload import sims4.service_manager try: import _zone except ImportError: class _zone: @staticmethod def invite_sims_to_zone(*_,", "_zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker = _zone.get_hide_from_lot_picker with sims4.reload.protected(globals()): tuning_managers = InstanceTuningManagers() get_instance_manager = tuning_managers.__getitem__ _account_service", "_server_clock_service import clock _server_clock_service = clock.ServerClock(ticks=initial_ticks) def get_master_controller(): return current_zone().master_controller def get_persistence_service(): global", "current_zone() if zone is not None and hasattr(zone, 'object_routing_service'): return zone.object_routing_service def get_landlord_service():", "_zone.get_world_id get_world_and_lot_description_id_from_zone_id = _zone.get_world_and_lot_description_id_from_zone_id get_is_eco_footprint_compatible_for_world_description = _zone.get_is_eco_footprint_compatible_for_world_description get_hide_from_lot_picker = _zone.get_hide_from_lot_picker with sims4.reload.protected(globals()): tuning_managers", "InstanceTuningManagers from sims4.resources import INSTANCE_TUNING_DEFINITIONS from sims4.tuning.instance_manager import TuningInstanceManager from sims4.tuning.tunable import Tunable,", "if _zone_manager is not None: return _zone_manager.current_zone def current_zone_id(): if _zone_manager is not", "= None _event_manager = None _server_clock_service = None _persistence_service = None _terrain_service =", "@staticmethod def get_rent(*_, **__): return 0 @staticmethod def get_lot_description_id(*_, **__): pass @staticmethod def", "social_group_cluster_service(): return current_zone().social_group_cluster_service def on_client_connect(client): sims4.core_services.service_manager.on_client_connect(client) game_services.service_manager.on_client_connect(client) current_zone().service_manager.on_client_connect(client) def on_client_disconnect(client): sims4.core_services.service_manager.on_client_disconnect(client) if game_services.service_manager.allow_shutdown:", "None: return client.active_sim def active_sim_info(): client = client_manager().get_first_client() if client is not None:", "def conditional_layer_service(): return current_zone().conditional_layer_service def get_sickness_service(): return game_services.service_manager.sickness_service def get_curfew_service(): return game_services.service_manager.curfew_service def", "not household.is_active_household: return return household.object_preference_tracker travel_group = travel_group_manager().get_travel_group_by_zone_id(zone.id) if travel_group is not None:", "current_zone().zone_modifier_service def get_demographics_service(): return current_zone().demographics_service def get_service_npc_service(): return current_zone().service_npc_service def conditional_layer_service(): return current_zone().conditional_layer_service", "active_lot_id(): lot = active_lot() if lot is not None: return lot.lot_id def client_object_managers():", "return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service(): return current_zone().ui_dialog_service def config_service(): return", "= client_manager().get_first_client() if client is not None: return client.household_id def active_household_lot_id(): household =", "get_building_type(*_, **__): return 0 @staticmethod def get_eco_footprint_value(*_, **__): return 0 @staticmethod def get_rent(*_,", "get_utilities_manager_by_household_id(household_id) return get_utilities_manager_by_zone_id(current_zone_id()) def get_utilities_manager_by_household_id(household_id): return game_services.service_manager.utilities_manager.get_manager_for_household(household_id) def get_utilities_manager_by_zone_id(zone_id): return game_services.service_manager.utilities_manager.get_manager_for_zone(zone_id) def ui_dialog_service():", "[server_clock_service(), get_persistence_service()] services = [_distributor_service, _intern_service, _intern_service.get_start_interning(), TimeStampService] instantiated_tuning_managers = [] for definition", "getattr(game_services.service_manager, 'global_policy_service', None) def narrative_service(): return getattr(game_services.service_manager, 'narrative_service', None) def organization_service(): return getattr(game_services.service_manager,", "relationship_service(): return game_services.service_manager.relationship_service def hidden_sim_service(): return game_services.service_manager.hidden_sim_service def weather_service(): return getattr(game_services.service_manager, 'weather_service', None)", "start at {}'.format(time_stamp)) if time_delta is None: time_delta = time_stamp else: time_delta =", "None: return zone.street def get_zone(zone_id, allow_uninstantiated_zones=False): if _zone_manager is not None: return _zone_manager.get(zone_id,", "not None: household = household_manager().get(zone.lot.owner_household_id) if household is not None: if require_active_household and", "def get_plex_service(): return current_zone().plex_service def get_door_service(): return current_zone().door_service def get_zone_modifier_service(): return current_zone().zone_modifier_service def", "return game_services.service_manager.tutorial_service def calendar_service(): return current_zone().calendar_service def get_rabbit_hole_service(): return game_services.service_manager.rabbit_hole_service def holiday_service(): return", "neighborhood_population_service(): return current_zone().neighborhood_population_service def get_reset_and_delete_service(): return current_zone().reset_and_delete_service def venue_service(): return current_zone().venue_service def venue_game_service():" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "import torch.nn as nn import torch.nn.functional as f import torch from . import", "mask): x = x.transpose(1, 2) # B x C x L mask =", "Conv1d class Encoder(nn.Module): def __init__(self, args, input_size): super().__init__() self.dropout = args.dropout self.encoders =", "2) # B x C x L mask = mask.transpose(1, 2) for i,", "as encoder import torch.nn as nn import torch.nn.functional as f import torch from", "KIND, either express or implied. # See the License for the specific language", "under the License. # add BiLSTM as encoder import torch.nn as nn import", "Unless required by applicable law or agreed to in writing, software # distributed", "BiLSTM as encoder import torch.nn as nn import torch.nn.functional as f import torch", "self.encoders = nn.ModuleList([Conv1d( in_channels=input_size if i == 0 else args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for", "(C) 2019 Alibaba Group Holding Limited # # Licensed under the Apache License,", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "License. # You may obtain a copy of the License at # #", "as nn import torch.nn.functional as f import torch from . import Conv1d class", "descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return sorted_batch, sorted_seq_lens, sorting_index, restoration_index class Seq2SeqEncoder(nn.Module): def", "from torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__() self.rnn_type = rnn_type self.input_size = input_size self.hidden_size = hidden_size", "sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return sorted_batch, sorted_seq_lens, sorting_index, restoration_index class Seq2SeqEncoder(nn.Module):", "law or agreed to in writing, software # distributed under the License is", "num_layers=num_layers, bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional) def forward(self, sequences_batch, sequences_lengths): outputs, _ = self._encoder(sequences_batch,", "the License for the specific language governing permissions and # limitations under the", "hidden_size, num_layers=num_layers, bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional) def forward(self, sequences_batch, sequences_lengths): outputs, _ =", "self._encoder = rnn_type(input_size, hidden_size, num_layers=num_layers, bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional) def forward(self, sequences_batch, sequences_lengths):", "compliance with the License. # You may obtain a copy of the License", "nn.ModuleList([Conv1d( in_channels=input_size if i == 0 else args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for i in", "dropout=0.2, bidirectional=False): assert issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type must be a class inheriting from torch.nn.RNNBase\"", "idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping = sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping)", "self.dropout = dropout self.bidirectional = bidirectional self._encoder = rnn_type(input_size, hidden_size, num_layers=num_layers, bias=bias, batch_first=True,", "mask.transpose(1, 2) for i, encoder in enumerate(self.encoders): x.masked_fill_(~mask, 0.) if i > 0:", "= rnn_type(input_size, hidden_size, num_layers=num_layers, bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional) def forward(self, sequences_batch, sequences_lengths): outputs,", "= nn.ModuleList([Conv1d( in_channels=input_size if i == 0 else args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for i", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "0.) if i > 0: x = f.dropout(x, self.dropout, self.training) x = encoder(x)", "this file except in compliance with the License. # You may obtain a", "Limited # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in_channels=input_size if i == 0 else args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)])", "x.transpose(1, 2) # B x L x C def sort_by_seq_lens(batch, sequences_lengths, descending=True): sorted_seq_lens,", "num_layers=1, bias=True, dropout=0.2, bidirectional=False): assert issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type must be a class inheriting", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "x.transpose(1, 2) # B x C x L mask = mask.transpose(1, 2) for", "encoder import torch.nn as nn import torch.nn.functional as f import torch from .", "you may not use this file except in compliance with the License. #", "= rnn_type self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias =", "bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional) def forward(self, sequences_batch, sequences_lengths): outputs, _ = self._encoder(sequences_batch, None)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping = sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return sorted_batch, sorted_seq_lens,", "reverse_mapping) return sorted_batch, sorted_seq_lens, sorting_index, restoration_index class Seq2SeqEncoder(nn.Module): def __init__(self, rnn_type, input_size, hidden_size,", "== 0 else args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)]) def forward(self, x,", "sorting_index =\\ sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _,", "i in range(args.enc_layers)]) def forward(self, x, mask): x = x.transpose(1, 2) # B", "num_layers self.bias = bias self.dropout = dropout self.bidirectional = bidirectional self._encoder = rnn_type(input_size,", "bias self.dropout = dropout self.bidirectional = bidirectional self._encoder = rnn_type(input_size, hidden_size, num_layers=num_layers, bias=bias,", "descending=True): sorted_seq_lens, sorting_index =\\ sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range = torch.arange(0,", "ANY KIND, either express or implied. # See the License for the specific", "torch from . import Conv1d class Encoder(nn.Module): def __init__(self, args, input_size): super().__init__() self.dropout", "self.dropout, self.training) x = encoder(x) x = f.dropout(x, self.dropout, self.training) return x.transpose(1, 2)", "x L x C def sort_by_seq_lens(batch, sequences_lengths, descending=True): sorted_seq_lens, sorting_index =\\ sequences_lengths.sort(0, descending=descending)", "the License. # add BiLSTM as encoder import torch.nn as nn import torch.nn.functional", "= args.dropout self.encoders = nn.ModuleList([Conv1d( in_channels=input_size if i == 0 else args.hidden_size, out_channels=args.hidden_size,", "in compliance with the License. # You may obtain a copy of the", ". import Conv1d class Encoder(nn.Module): def __init__(self, args, input_size): super().__init__() self.dropout = args.dropout", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# add BiLSTM as encoder import torch.nn as nn import torch.nn.functional as f", "C x L mask = mask.transpose(1, 2) for i, encoder in enumerate(self.encoders): x.masked_fill_(~mask,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "x = x.transpose(1, 2) # B x C x L mask = mask.transpose(1,", "L x C def sort_by_seq_lens(batch, sequences_lengths, descending=True): sorted_seq_lens, sorting_index =\\ sequences_lengths.sort(0, descending=descending) sorted_batch", "sorting_index) idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping = sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "= sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return sorted_batch, sorted_seq_lens, sorting_index, restoration_index class", "i, encoder in enumerate(self.encoders): x.masked_fill_(~mask, 0.) if i > 0: x = f.dropout(x,", "sorting_index, restoration_index class Seq2SeqEncoder(nn.Module): def __init__(self, rnn_type, input_size, hidden_size, num_layers=1, bias=True, dropout=0.2, bidirectional=False):", "not use this file except in compliance with the License. # You may", "self.num_layers = num_layers self.bias = bias self.dropout = dropout self.bidirectional = bidirectional self._encoder", "= bias self.dropout = dropout self.bidirectional = bidirectional self._encoder = rnn_type(input_size, hidden_size, num_layers=num_layers,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)]) def forward(self, x, mask): x = x.transpose(1,", "\"rnn_type must be a class inheriting from torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__() self.rnn_type = rnn_type", "C def sort_by_seq_lens(batch, sequences_lengths, descending=True): sorted_seq_lens, sorting_index =\\ sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0,", "See the License for the specific language governing permissions and # limitations under", "sorted_batch, sorted_seq_lens, sorting_index, restoration_index class Seq2SeqEncoder(nn.Module): def __init__(self, rnn_type, input_size, hidden_size, num_layers=1, bias=True,", "bidirectional self._encoder = rnn_type(input_size, hidden_size, num_layers=num_layers, bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional) def forward(self, sequences_batch,", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "return x.transpose(1, 2) # B x L x C def sort_by_seq_lens(batch, sequences_lengths, descending=True):", "License, Version 2.0 (the \"License\"); # you may not use this file except", "= mask.transpose(1, 2) for i, encoder in enumerate(self.encoders): x.masked_fill_(~mask, 0.) if i >", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "limitations under the License. # add BiLSTM as encoder import torch.nn as nn", "for i, encoder in enumerate(self.encoders): x.masked_fill_(~mask, 0.) if i > 0: x =", "permissions and # limitations under the License. # add BiLSTM as encoder import", "i > 0: x = f.dropout(x, self.dropout, self.training) x = encoder(x) x =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "= idx_range.index_select(0, reverse_mapping) return sorted_batch, sorted_seq_lens, sorting_index, restoration_index class Seq2SeqEncoder(nn.Module): def __init__(self, rnn_type,", "class Seq2SeqEncoder(nn.Module): def __init__(self, rnn_type, input_size, hidden_size, num_layers=1, bias=True, dropout=0.2, bidirectional=False): assert issubclass(rnn_type,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "restoration_index = idx_range.index_select(0, reverse_mapping) return sorted_batch, sorted_seq_lens, sorting_index, restoration_index class Seq2SeqEncoder(nn.Module): def __init__(self,", "governing permissions and # limitations under the License. # add BiLSTM as encoder", "self.training) return x.transpose(1, 2) # B x L x C def sort_by_seq_lens(batch, sequences_lengths,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "import torch.nn.functional as f import torch from . import Conv1d class Encoder(nn.Module): def", "in range(args.enc_layers)]) def forward(self, x, mask): x = x.transpose(1, 2) # B x", "sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "for i in range(args.enc_layers)]) def forward(self, x, mask): x = x.transpose(1, 2) #", "0: x = f.dropout(x, self.dropout, self.training) x = encoder(x) x = f.dropout(x, self.dropout,", "restoration_index class Seq2SeqEncoder(nn.Module): def __init__(self, rnn_type, input_size, hidden_size, num_layers=1, bias=True, dropout=0.2, bidirectional=False): assert", "OF ANY KIND, either express or implied. # See the License for the", "Seq2SeqEncoder(nn.Module): def __init__(self, rnn_type, input_size, hidden_size, num_layers=1, bias=True, dropout=0.2, bidirectional=False): assert issubclass(rnn_type, nn.RNNBase),\\", "# Copyright (C) 2019 Alibaba Group Holding Limited # # Licensed under the", "2.0 (the \"License\"); # you may not use this file except in compliance", "Encoder(nn.Module): def __init__(self, args, input_size): super().__init__() self.dropout = args.dropout self.encoders = nn.ModuleList([Conv1d( in_channels=input_size", "=\\ sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping", "x = encoder(x) x = f.dropout(x, self.dropout, self.training) return x.transpose(1, 2) # B", "sequences_lengths, descending=True): sorted_seq_lens, sorting_index =\\ sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range =", "# you may not use this file except in compliance with the License.", "self.dropout = args.dropout self.encoders = nn.ModuleList([Conv1d( in_channels=input_size if i == 0 else args.hidden_size,", "if i > 0: x = f.dropout(x, self.dropout, self.training) x = encoder(x) x", "nn.RNNBase),\\ \"rnn_type must be a class inheriting from torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__() self.rnn_type =", "as f import torch from . import Conv1d class Encoder(nn.Module): def __init__(self, args,", "encoder in enumerate(self.encoders): x.masked_fill_(~mask, 0.) if i > 0: x = f.dropout(x, self.dropout,", "2) # B x L x C def sort_by_seq_lens(batch, sequences_lengths, descending=True): sorted_seq_lens, sorting_index", "must be a class inheriting from torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__() self.rnn_type = rnn_type self.input_size", "for the specific language governing permissions and # limitations under the License. #", "agreed to in writing, software # distributed under the License is distributed on", "hidden_size, num_layers=1, bias=True, dropout=0.2, bidirectional=False): assert issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type must be a class", "= bidirectional self._encoder = rnn_type(input_size, hidden_size, num_layers=num_layers, bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional) def forward(self,", "self.bidirectional = bidirectional self._encoder = rnn_type(input_size, hidden_size, num_layers=num_layers, bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional) def", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "mask = mask.transpose(1, 2) for i, encoder in enumerate(self.encoders): x.masked_fill_(~mask, 0.) if i", "dropout=dropout, bidirectional=bidirectional) def forward(self, sequences_batch, sequences_lengths): outputs, _ = self._encoder(sequences_batch, None) return outputs", "x, mask): x = x.transpose(1, 2) # B x C x L mask", "reverse_mapping = sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return sorted_batch, sorted_seq_lens, sorting_index, restoration_index", "(the \"License\"); # you may not use this file except in compliance with", "idx_range.index_select(0, reverse_mapping) return sorted_batch, sorted_seq_lens, sorting_index, restoration_index class Seq2SeqEncoder(nn.Module): def __init__(self, rnn_type, input_size,", "x C def sort_by_seq_lens(batch, sequences_lengths, descending=True): sorted_seq_lens, sorting_index =\\ sequences_lengths.sort(0, descending=descending) sorted_batch =", "args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)]) def forward(self, x, mask): x =", "x C x L mask = mask.transpose(1, 2) for i, encoder in enumerate(self.encoders):", "rnn_type self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias", "# # Unless required by applicable law or agreed to in writing, software", "from . import Conv1d class Encoder(nn.Module): def __init__(self, args, input_size): super().__init__() self.dropout =", "Group Holding Limited # # Licensed under the Apache License, Version 2.0 (the", "= f.dropout(x, self.dropout, self.training) x = encoder(x) x = f.dropout(x, self.dropout, self.training) return", "Copyright (C) 2019 Alibaba Group Holding Limited # # Licensed under the Apache", "B x L x C def sort_by_seq_lens(batch, sequences_lengths, descending=True): sorted_seq_lens, sorting_index =\\ sequences_lengths.sort(0,", "express or implied. # See the License for the specific language governing permissions", "assert issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type must be a class inheriting from torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__()", "encoder(x) x = f.dropout(x, self.dropout, self.training) return x.transpose(1, 2) # B x L", "a class inheriting from torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__() self.rnn_type = rnn_type self.input_size = input_size", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "and # limitations under the License. # add BiLSTM as encoder import torch.nn", "except in compliance with the License. # You may obtain a copy of", "sort_by_seq_lens(batch, sequences_lengths, descending=True): sorted_seq_lens, sorting_index =\\ sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range", "input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.dropout = dropout", "be a class inheriting from torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__() self.rnn_type = rnn_type self.input_size =", "by applicable law or agreed to in writing, software # distributed under the", "= x.transpose(1, 2) # B x C x L mask = mask.transpose(1, 2)", "hidden_size self.num_layers = num_layers self.bias = bias self.dropout = dropout self.bidirectional = bidirectional", "Alibaba Group Holding Limited # # Licensed under the Apache License, Version 2.0", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "def __init__(self, args, input_size): super().__init__() self.dropout = args.dropout self.encoders = nn.ModuleList([Conv1d( in_channels=input_size if", "x = f.dropout(x, self.dropout, self.training) x = encoder(x) x = f.dropout(x, self.dropout, self.training)", "def __init__(self, rnn_type, input_size, hidden_size, num_layers=1, bias=True, dropout=0.2, bidirectional=False): assert issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type", "either express or implied. # See the License for the specific language governing", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "torch.nn as nn import torch.nn.functional as f import torch from . import Conv1d", "= encoder(x) x = f.dropout(x, self.dropout, self.training) return x.transpose(1, 2) # B x", "in enumerate(self.encoders): x.masked_fill_(~mask, 0.) if i > 0: x = f.dropout(x, self.dropout, self.training)", "self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.dropout", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "bias=True, dropout=0.2, bidirectional=False): assert issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type must be a class inheriting from", "dropout self.bidirectional = bidirectional self._encoder = rnn_type(input_size, hidden_size, num_layers=num_layers, bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional)", "if i == 0 else args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)]) def", "super().__init__() self.dropout = args.dropout self.encoders = nn.ModuleList([Conv1d( in_channels=input_size if i == 0 else", "= f.dropout(x, self.dropout, self.training) return x.transpose(1, 2) # B x L x C", "= torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping = sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "coding=utf-8 # Copyright (C) 2019 Alibaba Group Holding Limited # # Licensed under", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "args, input_size): super().__init__() self.dropout = args.dropout self.encoders = nn.ModuleList([Conv1d( in_channels=input_size if i ==", "bidirectional=False): assert issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type must be a class inheriting from torch.nn.RNNBase\" super(Seq2SeqEncoder,", "# B x C x L mask = mask.transpose(1, 2) for i, encoder", "nn import torch.nn.functional as f import torch from . import Conv1d class Encoder(nn.Module):", "args.dropout self.encoders = nn.ModuleList([Conv1d( in_channels=input_size if i == 0 else args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes)", "language governing permissions and # limitations under the License. # add BiLSTM as", "file except in compliance with the License. # You may obtain a copy", "the specific language governing permissions and # limitations under the License. # add", "Holding Limited # # Licensed under the Apache License, Version 2.0 (the \"License\");", "x = f.dropout(x, self.dropout, self.training) return x.transpose(1, 2) # B x L x", "sorted_seq_lens, sorting_index =\\ sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device)", "else args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)]) def forward(self, x, mask): x", "__init__(self, rnn_type, input_size, hidden_size, num_layers=1, bias=True, dropout=0.2, bidirectional=False): assert issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type must", "issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type must be a class inheriting from torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__() self.rnn_type", "# limitations under the License. # add BiLSTM as encoder import torch.nn as", "descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping = sorting_index.sort(0,", "return sorted_batch, sorted_seq_lens, sorting_index, restoration_index class Seq2SeqEncoder(nn.Module): def __init__(self, rnn_type, input_size, hidden_size, num_layers=1,", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "import torch from . import Conv1d class Encoder(nn.Module): def __init__(self, args, input_size): super().__init__()", "range(args.enc_layers)]) def forward(self, x, mask): x = x.transpose(1, 2) # B x C", "License for the specific language governing permissions and # limitations under the License.", "B x C x L mask = mask.transpose(1, 2) for i, encoder in", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "f.dropout(x, self.dropout, self.training) x = encoder(x) x = f.dropout(x, self.dropout, self.training) return x.transpose(1,", "rnn_type(input_size, hidden_size, num_layers=num_layers, bias=bias, batch_first=True, dropout=dropout, bidirectional=bidirectional) def forward(self, sequences_batch, sequences_lengths): outputs, _", "i == 0 else args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)]) def forward(self,", "= batch.index_select(0, sorting_index) idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping = sorting_index.sort(0, descending=False) restoration_index", "the License. # You may obtain a copy of the License at #", "batch.index_select(0, sorting_index) idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping = sorting_index.sort(0, descending=False) restoration_index =", "rnn_type, input_size, hidden_size, num_layers=1, bias=True, dropout=0.2, bidirectional=False): assert issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type must be", "torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__() self.rnn_type = rnn_type self.input_size = input_size self.hidden_size = hidden_size self.num_layers", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "> 0: x = f.dropout(x, self.dropout, self.training) x = encoder(x) x = f.dropout(x,", "def forward(self, x, mask): x = x.transpose(1, 2) # B x C x", "class inheriting from torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__() self.rnn_type = rnn_type self.input_size = input_size self.hidden_size", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "# B x L x C def sort_by_seq_lens(batch, sequences_lengths, descending=True): sorted_seq_lens, sorting_index =\\", "implied. # See the License for the specific language governing permissions and #", "specific language governing permissions and # limitations under the License. # add BiLSTM", "sorted_batch = batch.index_select(0, sorting_index) idx_range = torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping = sorting_index.sort(0, descending=False)", "input_size): super().__init__() self.dropout = args.dropout self.encoders = nn.ModuleList([Conv1d( in_channels=input_size if i == 0", "\"License\"); # you may not use this file except in compliance with the", "import Conv1d class Encoder(nn.Module): def __init__(self, args, input_size): super().__init__() self.dropout = args.dropout self.encoders", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "0 else args.hidden_size, out_channels=args.hidden_size, kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)]) def forward(self, x, mask):", "required by applicable law or agreed to in writing, software # distributed under", "x L mask = mask.transpose(1, 2) for i, encoder in enumerate(self.encoders): x.masked_fill_(~mask, 0.)", "batch_first=True, dropout=dropout, bidirectional=bidirectional) def forward(self, sequences_batch, sequences_lengths): outputs, _ = self._encoder(sequences_batch, None) return", "self.training) x = encoder(x) x = f.dropout(x, self.dropout, self.training) return x.transpose(1, 2) #", "= input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.dropout =", "= num_layers self.bias = bias self.dropout = dropout self.bidirectional = bidirectional self._encoder =", "def sort_by_seq_lens(batch, sequences_lengths, descending=True): sorted_seq_lens, sorting_index =\\ sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index)", "applicable law or agreed to in writing, software # distributed under the License", "input_size, hidden_size, num_layers=1, bias=True, dropout=0.2, bidirectional=False): assert issubclass(rnn_type, nn.RNNBase),\\ \"rnn_type must be a", "super(Seq2SeqEncoder, self).__init__() self.rnn_type = rnn_type self.input_size = input_size self.hidden_size = hidden_size self.num_layers =", "# coding=utf-8 # Copyright (C) 2019 Alibaba Group Holding Limited # # Licensed", "L mask = mask.transpose(1, 2) for i, encoder in enumerate(self.encoders): x.masked_fill_(~mask, 0.) if", "self).__init__() self.rnn_type = rnn_type self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers", "self.rnn_type = rnn_type self.input_size = input_size self.hidden_size = hidden_size self.num_layers = num_layers self.bias", "__init__(self, args, input_size): super().__init__() self.dropout = args.dropout self.encoders = nn.ModuleList([Conv1d( in_channels=input_size if i", "f import torch from . import Conv1d class Encoder(nn.Module): def __init__(self, args, input_size):", "self.bias = bias self.dropout = dropout self.bidirectional = bidirectional self._encoder = rnn_type(input_size, hidden_size,", "= dropout self.bidirectional = bidirectional self._encoder = rnn_type(input_size, hidden_size, num_layers=num_layers, bias=bias, batch_first=True, dropout=dropout,", "sorted_seq_lens, sorting_index, restoration_index class Seq2SeqEncoder(nn.Module): def __init__(self, rnn_type, input_size, hidden_size, num_layers=1, bias=True, dropout=0.2,", "torch.arange(0, len(sequences_lengths)).to(sequences_lengths.device) _, reverse_mapping = sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return sorted_batch,", "or agreed to in writing, software # distributed under the License is distributed", "or implied. # See the License for the specific language governing permissions and", "= hidden_size self.num_layers = num_layers self.bias = bias self.dropout = dropout self.bidirectional =", "f.dropout(x, self.dropout, self.training) return x.transpose(1, 2) # B x L x C def", "License. # add BiLSTM as encoder import torch.nn as nn import torch.nn.functional as", "self.hidden_size = hidden_size self.num_layers = num_layers self.bias = bias self.dropout = dropout self.bidirectional", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "x.masked_fill_(~mask, 0.) if i > 0: x = f.dropout(x, self.dropout, self.training) x =", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "class Encoder(nn.Module): def __init__(self, args, input_size): super().__init__() self.dropout = args.dropout self.encoders = nn.ModuleList([Conv1d(", "forward(self, x, mask): x = x.transpose(1, 2) # B x C x L", "_, reverse_mapping = sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return sorted_batch, sorted_seq_lens, sorting_index,", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "torch.nn.functional as f import torch from . import Conv1d class Encoder(nn.Module): def __init__(self,", "enumerate(self.encoders): x.masked_fill_(~mask, 0.) if i > 0: x = f.dropout(x, self.dropout, self.training) x", "self.dropout, self.training) return x.transpose(1, 2) # B x L x C def sort_by_seq_lens(batch,", "inheriting from torch.nn.RNNBase\" super(Seq2SeqEncoder, self).__init__() self.rnn_type = rnn_type self.input_size = input_size self.hidden_size =", "kernel_sizes=args.kernel_sizes) for i in range(args.enc_layers)]) def forward(self, x, mask): x = x.transpose(1, 2)", "2) for i, encoder in enumerate(self.encoders): x.masked_fill_(~mask, 0.) if i > 0: x", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "add BiLSTM as encoder import torch.nn as nn import torch.nn.functional as f import", "2019 Alibaba Group Holding Limited # # Licensed under the Apache License, Version", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "ser limitados a 72 # caracteres por linha instancia = MinhaClasse() print(instancia.descricao) instancia.descricao", "# duas linhas em branco separam classes de outras funções class MinhaClasse(): def", "em branco entre métodos def meu_metodo(self, arg1): pass def main(): # comentários que", "# caracteres por linha instancia = MinhaClasse() print(instancia.descricao) instancia.descricao = \"Classe da Jess\"", "def main(): # comentários que usam mais de uma linha, devem ser limitados", "deve ter a sua própria linha import sys import os # duas linhas", "classe, usamos uma linha em branco entre métodos def meu_metodo(self, arg1): pass def", "linha em branco entre métodos def meu_metodo(self, arg1): pass def main(): # comentários", "que usam mais de uma linha, devem ser limitados a 72 # caracteres", "= \"Minha Classe\" # dentro de uma classe, usamos uma linha em branco", "entre métodos def meu_metodo(self, arg1): pass def main(): # comentários que usam mais", "meu_metodo(self, arg1): pass def main(): # comentários que usam mais de uma linha,", "linha, devem ser limitados a 72 # caracteres por linha instancia = MinhaClasse()", "a 72 # caracteres por linha instancia = MinhaClasse() print(instancia.descricao) instancia.descricao = \"Classe", "class MinhaClasse(): def __init__(self): self.descricao = \"Minha Classe\" # dentro de uma classe,", "MinhaClasse() print(instancia.descricao) instancia.descricao = \"Classe da Jess\" print(instancia.descricao) if __name__ == \"__main__\": main()", "branco separam classes de outras funções class MinhaClasse(): def __init__(self): self.descricao = \"Minha", "sua própria linha import sys import os # duas linhas em branco separam", "de uma classe, usamos uma linha em branco entre métodos def meu_metodo(self, arg1):", "# comentários que usam mais de uma linha, devem ser limitados a 72", "\"Minha Classe\" # dentro de uma classe, usamos uma linha em branco entre", "própria linha import sys import os # duas linhas em branco separam classes", "def meu_metodo(self, arg1): pass def main(): # comentários que usam mais de uma", "import deve ter a sua própria linha import sys import os # duas", "classes de outras funções class MinhaClasse(): def __init__(self): self.descricao = \"Minha Classe\" #", "linhas em branco separam classes de outras funções class MinhaClasse(): def __init__(self): self.descricao", "self.descricao = \"Minha Classe\" # dentro de uma classe, usamos uma linha em", "__init__(self): self.descricao = \"Minha Classe\" # dentro de uma classe, usamos uma linha", "arg1): pass def main(): # comentários que usam mais de uma linha, devem", "mais de uma linha, devem ser limitados a 72 # caracteres por linha", "por linha instancia = MinhaClasse() print(instancia.descricao) instancia.descricao = \"Classe da Jess\" print(instancia.descricao) if", "separam classes de outras funções class MinhaClasse(): def __init__(self): self.descricao = \"Minha Classe\"", "dentro de uma classe, usamos uma linha em branco entre métodos def meu_metodo(self,", "# cada import deve ter a sua própria linha import sys import os", "de outras funções class MinhaClasse(): def __init__(self): self.descricao = \"Minha Classe\" # dentro", "sys import os # duas linhas em branco separam classes de outras funções", "uma classe, usamos uma linha em branco entre métodos def meu_metodo(self, arg1): pass", "import os # duas linhas em branco separam classes de outras funções class", "uma linha em branco entre métodos def meu_metodo(self, arg1): pass def main(): #", "em branco separam classes de outras funções class MinhaClasse(): def __init__(self): self.descricao =", "cada import deve ter a sua própria linha import sys import os #", "usamos uma linha em branco entre métodos def meu_metodo(self, arg1): pass def main():", "main(): # comentários que usam mais de uma linha, devem ser limitados a", "usam mais de uma linha, devem ser limitados a 72 # caracteres por", "caracteres por linha instancia = MinhaClasse() print(instancia.descricao) instancia.descricao = \"Classe da Jess\" print(instancia.descricao)", "de uma linha, devem ser limitados a 72 # caracteres por linha instancia", "outras funções class MinhaClasse(): def __init__(self): self.descricao = \"Minha Classe\" # dentro de", "MinhaClasse(): def __init__(self): self.descricao = \"Minha Classe\" # dentro de uma classe, usamos", "branco entre métodos def meu_metodo(self, arg1): pass def main(): # comentários que usam", "métodos def meu_metodo(self, arg1): pass def main(): # comentários que usam mais de", "comentários que usam mais de uma linha, devem ser limitados a 72 #", "funções class MinhaClasse(): def __init__(self): self.descricao = \"Minha Classe\" # dentro de uma", "uma linha, devem ser limitados a 72 # caracteres por linha instancia =", "def __init__(self): self.descricao = \"Minha Classe\" # dentro de uma classe, usamos uma", "linha instancia = MinhaClasse() print(instancia.descricao) instancia.descricao = \"Classe da Jess\" print(instancia.descricao) if __name__", "instancia = MinhaClasse() print(instancia.descricao) instancia.descricao = \"Classe da Jess\" print(instancia.descricao) if __name__ ==", "= MinhaClasse() print(instancia.descricao) instancia.descricao = \"Classe da Jess\" print(instancia.descricao) if __name__ == \"__main__\":", "72 # caracteres por linha instancia = MinhaClasse() print(instancia.descricao) instancia.descricao = \"Classe da", "linha import sys import os # duas linhas em branco separam classes de", "ter a sua própria linha import sys import os # duas linhas em", "# dentro de uma classe, usamos uma linha em branco entre métodos def", "duas linhas em branco separam classes de outras funções class MinhaClasse(): def __init__(self):", "a sua própria linha import sys import os # duas linhas em branco", "import sys import os # duas linhas em branco separam classes de outras", "os # duas linhas em branco separam classes de outras funções class MinhaClasse():", "devem ser limitados a 72 # caracteres por linha instancia = MinhaClasse() print(instancia.descricao)", "Classe\" # dentro de uma classe, usamos uma linha em branco entre métodos", "pass def main(): # comentários que usam mais de uma linha, devem ser", "limitados a 72 # caracteres por linha instancia = MinhaClasse() print(instancia.descricao) instancia.descricao =" ]
[]